repo_name
stringlengths 6
130
| hexsha
sequence | file_path
sequence | code
sequence | apis
sequence | possible_versions
list |
---|---|---|---|---|---|
Ankit-Kumar-Saini/Machine_Learning_Udacity | [
"00655d09ee2074f8f1bc899e90ae7291b6b0ee2d"
] | [
"Plagiarism Detection/source_sklearn/train.py"
] | [
"from __future__ import print_function\n\nimport argparse\nimport os\nimport pandas as pd\n\n# sklearn.externals.joblib is deprecated in 0.21 and will be removed in 0.23. \n# from sklearn.externals import joblib\n# Import joblib package directly\nimport joblib\nfrom sklearn.ensemble import RandomForestClassifier\n\n## TODO: Import any additional libraries you need to define a model\n\n\n# Provided model load function\ndef model_fn(model_dir):\n \"\"\"Load model from the model_dir. This is the same model that is saved\n in the main if statement.\n \"\"\"\n print(\"Loading model.\")\n \n # load using joblib\n model = joblib.load(os.path.join(model_dir, \"model.joblib\"))\n print(\"Done loading model.\")\n \n return model\n\n\n## TODO: Complete the main code\nif __name__ == '__main__':\n \n # All of the model parameters and training parameters are sent as arguments\n # when this script is executed, during a training job\n \n # Here we set up an argument parser to easily access the parameters\n parser = argparse.ArgumentParser()\n\n # SageMaker parameters, like the directories for training data and saving models; set automatically\n # Do not need to change\n parser.add_argument('--output-data-dir', type=str, default=os.environ['SM_OUTPUT_DATA_DIR'])\n parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR'])\n parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAIN'])\n \n ## TODO: Add any additional arguments that you will need to pass into your model\n parser.add_argument('--n_estimators', type = int, default = 100)\n parser.add_argument('--min_samples_split', type = int, default = 2)\n \n # args holds all passed-in arguments\n args = parser.parse_args()\n\n # Read in csv training file\n training_dir = args.data_dir\n train_data = pd.read_csv(os.path.join(training_dir, \"train.csv\"), header=None, names=None)\n\n # Labels are in the first column\n train_y = train_data.iloc[:,0]\n train_x = train_data.iloc[:,1:]\n \n \n ## --- Your code here --- ##\n \n\n ## TODO: Define a model \n model = RandomForestClassifier(n_estimators = args.n_estimators,\n min_samples_split = args.min_samples_split,\n random_state = 42)\n \n \n ## TODO: Train the model\n model.fit(train_x, train_y)\n \n \n ## --- End of your code --- ##\n \n\n # Save the trained model\n joblib.dump(model, os.path.join(args.model_dir, \"model.joblib\"))\n"
] | [
[
"sklearn.ensemble.RandomForestClassifier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
FedericoFontana/ray | [
"5a7feae15f8c74d5d196fea6697c1827008018f3"
] | [
"python/ray/rllib/models/action_dist.py"
] | [
"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom collections import namedtuple\nimport distutils.version\nimport numpy as np\n\nfrom ray.rllib.utils.annotations import override, DeveloperAPI\nfrom ray.rllib.utils import try_import_tf\n\ntf = try_import_tf()\n\nif tf:\n use_tf150_api = (distutils.version.LooseVersion(tf.VERSION) >=\n distutils.version.LooseVersion(\"1.5.0\"))\nelse:\n use_tf150_api = False\n\n\n@DeveloperAPI\nclass ActionDistribution(object):\n \"\"\"The policy action distribution of an agent.\n\n Args:\n inputs (Tensor): The input vector to compute samples from.\n \"\"\"\n\n @DeveloperAPI\n def __init__(self, inputs):\n self.inputs = inputs\n self.sample_op = self._build_sample_op()\n\n @DeveloperAPI\n def logp(self, x):\n \"\"\"The log-likelihood of the action distribution.\"\"\"\n raise NotImplementedError\n\n @DeveloperAPI\n def kl(self, other):\n \"\"\"The KL-divergence between two action distributions.\"\"\"\n raise NotImplementedError\n\n @DeveloperAPI\n def entropy(self):\n \"\"\"The entropy of the action distribution.\"\"\"\n raise NotImplementedError\n\n @DeveloperAPI\n def _build_sample_op(self):\n \"\"\"Implement this instead of sample(), to enable op reuse.\n\n This is needed since the sample op is non-deterministic and is shared\n between sample() and sampled_action_prob().\n \"\"\"\n raise NotImplementedError\n\n @DeveloperAPI\n def sample(self):\n \"\"\"Draw a sample from the action distribution.\"\"\"\n return self.sample_op\n\n @DeveloperAPI\n def sampled_action_prob(self):\n \"\"\"Returns the log probability of the sampled action.\"\"\"\n return tf.exp(self.logp(self.sample_op))\n\n\nclass Categorical(ActionDistribution):\n \"\"\"Categorical distribution for discrete action spaces.\"\"\"\n\n @override(ActionDistribution)\n def logp(self, x):\n return -tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=self.inputs, labels=x)\n\n @override(ActionDistribution)\n def entropy(self):\n if use_tf150_api:\n a0 = self.inputs - tf.reduce_max(\n self.inputs, reduction_indices=[1], keepdims=True)\n else:\n a0 = self.inputs - tf.reduce_max(\n self.inputs, reduction_indices=[1], keep_dims=True)\n ea0 = tf.exp(a0)\n if use_tf150_api:\n z0 = tf.reduce_sum(ea0, reduction_indices=[1], keepdims=True)\n else:\n z0 = tf.reduce_sum(ea0, reduction_indices=[1], keep_dims=True)\n p0 = ea0 / z0\n return tf.reduce_sum(p0 * (tf.log(z0) - a0), reduction_indices=[1])\n\n @override(ActionDistribution)\n def kl(self, other):\n if use_tf150_api:\n a0 = self.inputs - tf.reduce_max(\n self.inputs, reduction_indices=[1], keepdims=True)\n a1 = other.inputs - tf.reduce_max(\n other.inputs, reduction_indices=[1], keepdims=True)\n else:\n a0 = self.inputs - tf.reduce_max(\n self.inputs, reduction_indices=[1], keep_dims=True)\n a1 = other.inputs - tf.reduce_max(\n other.inputs, reduction_indices=[1], keep_dims=True)\n ea0 = tf.exp(a0)\n ea1 = tf.exp(a1)\n if use_tf150_api:\n z0 = tf.reduce_sum(ea0, reduction_indices=[1], keepdims=True)\n z1 = tf.reduce_sum(ea1, reduction_indices=[1], keepdims=True)\n else:\n z0 = tf.reduce_sum(ea0, reduction_indices=[1], keep_dims=True)\n z1 = tf.reduce_sum(ea1, reduction_indices=[1], keep_dims=True)\n p0 = ea0 / z0\n return tf.reduce_sum(\n p0 * (a0 - tf.log(z0) - a1 + tf.log(z1)), reduction_indices=[1])\n\n @override(ActionDistribution)\n def _build_sample_op(self):\n return tf.squeeze(tf.multinomial(self.inputs, 1), axis=1)\n\n\nclass MultiCategorical(ActionDistribution):\n \"\"\"Categorical distribution for discrete action spaces.\"\"\"\n\n def __init__(self, inputs):\n self.cats = [Categorical(input_) for input_ in inputs]\n self.sample_op = self._build_sample_op()\n\n def logp(self, actions):\n # If tensor is provided, unstack it into list\n if isinstance(actions, tf.Tensor):\n actions = tf.unstack(actions, axis=1)\n logps = tf.stack(\n [cat.logp(act) for cat, act in zip(self.cats, actions)])\n return tf.reduce_sum(logps, axis=0)\n\n def entropy(self):\n return tf.stack([cat.entropy() for cat in self.cats], axis=1)\n\n def kl(self, other):\n return [cat.kl(oth_cat) for cat, oth_cat in zip(self.cats, other.cats)]\n\n def _build_sample_op(self):\n return tf.stack([cat.sample() for cat in self.cats], axis=1)\n\n\nclass DiagGaussian(ActionDistribution):\n \"\"\"Action distribution where each vector element is a gaussian.\n\n The first half of the input vector defines the gaussian means, and the\n second half the gaussian standard deviations.\n \"\"\"\n\n def __init__(self, inputs):\n mean, log_std = tf.split(inputs, 2, axis=1)\n self.mean = mean\n self.log_std = log_std\n self.std = tf.exp(log_std)\n ActionDistribution.__init__(self, inputs)\n\n @override(ActionDistribution)\n def logp(self, x):\n return (-0.5 * tf.reduce_sum(\n tf.square((x - self.mean) / self.std), reduction_indices=[1]) -\n 0.5 * np.log(2.0 * np.pi) * tf.to_float(tf.shape(x)[1]) -\n tf.reduce_sum(self.log_std, reduction_indices=[1]))\n\n @override(ActionDistribution)\n def kl(self, other):\n assert isinstance(other, DiagGaussian)\n return tf.reduce_sum(\n other.log_std - self.log_std +\n (tf.square(self.std) + tf.square(self.mean - other.mean)) /\n (2.0 * tf.square(other.std)) - 0.5,\n reduction_indices=[1])\n\n @override(ActionDistribution)\n def entropy(self):\n return tf.reduce_sum(\n .5 * self.log_std + .5 * np.log(2.0 * np.pi * np.e),\n reduction_indices=[1])\n\n @override(ActionDistribution)\n def _build_sample_op(self):\n return self.mean + self.std * tf.random_normal(tf.shape(self.mean))\n\n\nclass Deterministic(ActionDistribution):\n \"\"\"Action distribution that returns the input values directly.\n\n This is similar to DiagGaussian with standard deviation zero.\n \"\"\"\n\n @override(ActionDistribution)\n def sampled_action_prob(self):\n return 1.0\n\n @override(ActionDistribution)\n def _build_sample_op(self):\n return self.inputs\n\n\nclass MultiActionDistribution(ActionDistribution):\n \"\"\"Action distribution that operates for list of actions.\n\n Args:\n inputs (Tensor list): A list of tensors from which to compute samples.\n \"\"\"\n\n def __init__(self, inputs, action_space, child_distributions, input_lens):\n self.input_lens = input_lens\n split_inputs = tf.split(inputs, self.input_lens, axis=1)\n child_list = []\n for i, distribution in enumerate(child_distributions):\n child_list.append(distribution(split_inputs[i]))\n self.child_distributions = child_list\n\n @override(ActionDistribution)\n def logp(self, x):\n split_indices = []\n for dist in self.child_distributions:\n if isinstance(dist, Categorical):\n split_indices.append(1)\n else:\n split_indices.append(tf.shape(dist.sample())[1])\n split_list = tf.split(x, split_indices, axis=1)\n for i, distribution in enumerate(self.child_distributions):\n # Remove extra categorical dimension\n if isinstance(distribution, Categorical):\n split_list[i] = tf.cast(\n tf.squeeze(split_list[i], axis=-1), tf.int32)\n log_list = np.asarray([\n distribution.logp(split_x) for distribution, split_x in zip(\n self.child_distributions, split_list)\n ])\n return np.sum(log_list)\n\n @override(ActionDistribution)\n def kl(self, other):\n kl_list = np.asarray([\n distribution.kl(other_distribution)\n for distribution, other_distribution in zip(\n self.child_distributions, other.child_distributions)\n ])\n return np.sum(kl_list)\n\n @override(ActionDistribution)\n def entropy(self):\n entropy_list = np.array(\n [s.entropy() for s in self.child_distributions])\n return np.sum(entropy_list)\n\n @override(ActionDistribution)\n def sample(self):\n return TupleActions([s.sample() for s in self.child_distributions])\n\n @override(ActionDistribution)\n def sampled_action_prob(self):\n p = self.child_distributions[0].sampled_action_prob()\n for c in self.child_distributions[1:]:\n p *= c.sampled_action_prob()\n return p\n\n\nTupleActions = namedtuple(\"TupleActions\", [\"batches\"])\n\n\nclass Dirichlet(ActionDistribution):\n \"\"\"Dirichlet distribution for continuous actions that are between\n [0,1] and sum to 1.\n\n e.g. actions that represent resource allocation.\"\"\"\n\n def __init__(self, inputs):\n \"\"\"Input is a tensor of logits. The exponential of logits is used to\n parametrize the Dirichlet distribution as all parameters need to be\n positive. An arbitrary small epsilon is added to the concentration\n parameters to be zero due to numerical error.\n\n See issue #4440 for more details.\n \"\"\"\n self.epsilon = 1e-7\n concentration = tf.exp(inputs) + self.epsilon\n self.dist = tf.distributions.Dirichlet(\n concentration=concentration,\n validate_args=True,\n allow_nan_stats=False,\n )\n ActionDistribution.__init__(self, concentration)\n\n @override(ActionDistribution)\n def logp(self, x):\n # Support of Dirichlet are positive real numbers. x is already be\n # an array of positive number, but we clip to avoid zeros due to\n # numerical errors.\n x = tf.maximum(x, self.epsilon)\n x = x / tf.reduce_sum(x, axis=-1, keepdims=True)\n return self.dist.log_prob(x)\n\n @override(ActionDistribution)\n def entropy(self):\n return self.dist.entropy()\n\n @override(ActionDistribution)\n def kl(self, other):\n return self.dist.kl_divergence(other.dist)\n\n @override(ActionDistribution)\n def _build_sample_op(self):\n return self.dist.sample()\n"
] | [
[
"numpy.log",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
d-ataman/Char-NMT | [
"6305959d11d7a2efd68b05d3bf867f8193b902de"
] | [
"onmt/Models.py"
] | [
"from __future__ import division\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nfrom torch.autograd import Variable\nfrom torch.nn.utils.rnn import pack_padded_sequence as pack\nfrom torch.nn.utils.rnn import pad_packed_sequence as unpack\n\nimport numpy as np\n\nimport onmt\nfrom onmt.Utils import aeq\n#from onmt.modules.Samplers import DiagonalGaussianSampler\n\n\ndef rnn_factory(rnn_type, **kwargs):\n # Use pytorch version when available.\n no_pack_padded_seq = False\n if rnn_type == \"SRU\":\n # SRU doesn't support PackedSequence.\n no_pack_padded_seq = True\n rnn = onmt.modules.SRU(**kwargs)\n else:\n rnn = getattr(nn, rnn_type)(**kwargs)\n return rnn, no_pack_padded_seq\n\n\nclass EncoderBase(nn.Module):\n \"\"\"\n Base encoder class. Specifies the interface used by different encoder types\n and required by :obj:`onmt.Models.NMTModel`.\n\n .. mermaid::\n\n graph BT\n A[Input]\n subgraph RNN\n C[Pos 1]\n D[Pos 2]\n E[Pos N]\n end\n F[Memory_Bank]\n G[Final]\n A-->C\n A-->D\n A-->E\n C-->F\n D-->F\n E-->F\n E-->G\n \"\"\"\n def _check_args(self, input, lengths=None, hidden=None):\n s_len, n_batch, n_feats = input.size()\n if lengths is not None:\n n_batch_, = lengths.size()\n aeq(n_batch, n_batch_)\n\n def forward(self, src, lengths=None, encoder_state=None):\n \"\"\"\n Args:\n src (:obj:`LongTensor`):\n padded sequences of sparse indices `[src_len x batch x nfeat]`\n lengths (:obj:`LongTensor`): length of each sequence `[batch]`\n encoder_state (rnn-class specific):\n initial encoder_state state.\n\n Returns:\n (tuple of :obj:`FloatTensor`, :obj:`FloatTensor`):\n * final encoder state, used to initialize decoder\n * memory bank for attention, `[src_len x batch x hidden]`\n \"\"\"\n raise NotImplementedError\n\n\nclass MeanEncoder(EncoderBase):\n \"\"\"A trivial non-recurrent encoder. Simply applies mean pooling.\n\n Args:\n num_layers (int): number of replicated layers\n embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use\n \"\"\"\n def __init__(self, num_layers, embeddings):\n super(MeanEncoder, self).__init__()\n self.num_layers = num_layers\n self.embeddings = embeddings\n\n def forward(self, src, lengths=None, encoder_state=None):\n \"See :obj:`EncoderBase.forward()`\"\n self._check_args(src, lengths, encoder_state)\n\n emb = self.embeddings(src)\n s_len, batch, emb_dim = emb.size()\n mean = emb.mean(0).expand(self.num_layers, batch, emb_dim)\n memory_bank = emb\n encoder_final = (mean, mean)\n return encoder_final, memory_bank\n\n\nclass RNNEncoder(EncoderBase):\n \"\"\" A generic recurrent neural network encoder.\n\n Args:\n rnn_type (:obj:`str`):\n style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]\n bidirectional (bool) : use a bidirectional RNN\n num_layers (int) : number of stacked layers\n hidden_size (int) : hidden size of each layer\n dropout (float) : dropout value for :obj:`nn.Dropout`\n embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use\n \"\"\"\n def __init__(self, rnn_type, bidirectional, num_layers,\n hidden_size, dropout=0.0, embeddings=None,\n use_bridge=False):\n super(RNNEncoder, self).__init__()\n assert embeddings is not None\n\n num_directions = 2 if bidirectional else 1\n assert hidden_size % num_directions == 0\n hidden_size = hidden_size // num_directions\n self.embeddings = embeddings\n\n self.rnn, self.no_pack_padded_seq = \\\n rnn_factory(rnn_type,\n input_size=embeddings.embedding_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=dropout,\n bidirectional=bidirectional)\n\n # Initialize the bridge layer\n self.use_bridge = use_bridge\n if self.use_bridge:\n self._initialize_bridge(rnn_type,\n hidden_size,\n num_layers)\n\n def forward(self, src, lengths=None, encoder_state=None):\n \"See :obj:`EncoderBase.forward()`\"\n self._check_args(src, lengths, encoder_state)\n\n emb = self.embeddings(src)\n s_len, batch, emb_dim = emb.size()\n\n packed_emb = emb\n if lengths is not None and not self.no_pack_padded_seq:\n # Lengths data is wrapped inside a Variable.\n lengths = lengths.view(-1).tolist()\n packed_emb = pack(emb, lengths)\n\n memory_bank, encoder_final = self.rnn(packed_emb, encoder_state)\n\n if lengths is not None and not self.no_pack_padded_seq:\n memory_bank = unpack(memory_bank)[0]\n\n if self.use_bridge:\n encoder_final = self._bridge(encoder_final)\n return encoder_final, memory_bank\n\n def _initialize_bridge(self, rnn_type,\n hidden_size,\n num_layers):\n\n # LSTM has hidden and cell state, other only one\n number_of_states = 2 if rnn_type == \"LSTM\" else 1\n # Total number of states\n self.total_hidden_dim = hidden_size * num_layers\n\n # Build a linear layer for each\n self.bridge = nn.ModuleList([nn.Linear(self.total_hidden_dim,\n self.total_hidden_dim,\n bias=True)\n for i in range(number_of_states)])\n\n def _bridge(self, hidden):\n \"\"\"\n Forward hidden state through bridge\n \"\"\"\n def bottle_hidden(linear, states):\n \"\"\"\n Transform from 3D to 2D, apply linear and return initial size\n \"\"\"\n size = states.size()\n result = linear(states.view(-1, self.total_hidden_dim))\n return F.relu(result).view(size)\n\n if isinstance(hidden, tuple): # LSTM\n outs = tuple([bottle_hidden(layer, hidden[ix])\n for ix, layer in enumerate(self.bridge)])\n else:\n outs = bottle_hidden(self.bridge[0], hidden)\n return outs\n\n\nclass RNNDecoderBase(nn.Module):\n \"\"\"\n Base recurrent attention-based decoder class.\n Specifies the interface used by different decoder types\n and required by :obj:`onmt.Models.NMTModel`.\n\n\n .. mermaid::\n\n graph BT\n A[Input]\n subgraph RNN\n C[Pos 1]\n D[Pos 2]\n E[Pos N]\n end\n G[Decoder State]\n H[Decoder State]\n I[Outputs]\n F[Memory_Bank]\n A--emb-->C\n A--emb-->D\n A--emb-->E\n H-->C\n C-- attn --- F\n D-- attn --- F\n E-- attn --- F\n C-->I\n D-->I\n E-->I\n E-->G\n F---I\n\n Args:\n rnn_type (:obj:`str`):\n style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]\n bidirectional_encoder (bool) : use with a bidirectional encoder\n num_layers (int) : number of stacked layers\n hidden_size (int) : hidden size of each layer\n attn_type (str) : see :obj:`onmt.modules.GlobalAttention`\n coverage_attn (str): see :obj:`onmt.modules.GlobalAttention`\n context_gate (str): see :obj:`onmt.modules.ContextGate`\n copy_attn (bool): setup a separate copy attention mechanism\n dropout (float) : dropout value for :obj:`nn.Dropout`\n embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use\n \"\"\"\n def __init__(self, rnn_type, bidirectional_encoder, num_layers,\n hidden_size, attn_type=\"general\",\n coverage_attn=False, context_gate=None,\n copy_attn=False, dropout=0.0, embeddings=None,\n reuse_copy_attn=False):\n super(RNNDecoderBase, self).__init__()\n\n # Basic attributes.\n self.decoder_type = 'rnn'\n self.bidirectional_encoder = bidirectional_encoder\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.embeddings = embeddings\n self.dropout = nn.Dropout(dropout)\n\n # Build the RNN.\n self.rnn = self._build_rnn(rnn_type,\n input_size=self._input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=dropout)\n\n # Set up the context gate.\n self.context_gate = None\n if context_gate is not None:\n self.context_gate = onmt.modules.context_gate_factory(\n context_gate, self._input_size,\n hidden_size, hidden_size, hidden_size\n )\n\n # Set up the standard attention.\n self._coverage = coverage_attn\n self.attn = onmt.modules.GlobalAttention(\n hidden_size, coverage=coverage_attn,\n attn_type=attn_type, dropout=dropout\n )\n\n # Set up a separated copy attention layer, if needed.\n self._copy = False\n if copy_attn and not reuse_copy_attn:\n self.copy_attn = onmt.modules.GlobalAttention(\n hidden_size, attn_type=attn_type\n )\n if copy_attn:\n self._copy = True\n self._reuse_copy_attn = reuse_copy_attn\n\n def forward(self, tgt, memory_bank, state, memory_lengths=None):\n \"\"\"\n Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.Models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.\n \"\"\"\n # Check\n assert isinstance(state, RNNDecoderState)\n tgt_len, tgt_batch, _ = tgt.size()\n _, memory_batch, _ = memory_bank.size()\n aeq(tgt_batch, memory_batch)\n # END\n\n # Run the forward pass of the RNN.\n decoder_outputs, attns, decoder_final = self._run_forward_pass(\n tgt, memory_bank, state, memory_lengths=memory_lengths)\n\n # Update the state with the result.\n final_output = decoder_outputs[-1]\n coverage = None\n if \"coverage\" in attns:\n coverage = attns[\"coverage\"][-1].unsqueeze(0)\n state.update_state(decoder_final, final_output.unsqueeze(0), coverage)\n\n # Concatenates sequence of tensors along a new dimension.\n decoder_outputs = torch.stack(decoder_outputs)\n for k in attns:\n attns[k] = torch.stack(attns[k])\n\n return decoder_outputs, attns, state\n\n def init_decoder_state(self, src, memory_bank, encoder_final):\n def _fix_enc_hidden(h):\n # The encoder hidden is (layers*directions) x batch x dim.\n # We need to convert it to layers x batch x (directions*dim).\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h\n\n if isinstance(encoder_final, tuple): # LSTM\n return RNNDecoderState(self.hidden_size,\n tuple([_fix_enc_hidden(enc_hid)\n for enc_hid in encoder_final]))\n else: # GRU\n return RNNDecoderState(self.hidden_size,\n _fix_enc_hidden(encoder_final))\n\n\nclass StdRNNDecoder(RNNDecoderBase):\n \"\"\"\n Standard fully batched RNN decoder with attention.\n Faster implementation, uses CuDNN for implementation.\n See :obj:`RNNDecoderBase` for options.\n\n\n Based around the approach from\n \"Neural Machine Translation By Jointly Learning To Align and Translate\"\n :cite:`Bahdanau2015`\n\n\n Implemented without input_feeding and currently with no `coverage_attn`\n or `copy_attn` support.\n \"\"\"\n def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):\n \"\"\"\n Private helper for running the specific RNN forward pass.\n Must be overriden by all subclasses.\n Args:\n tgt (LongTensor): a sequence of input tokens tensors\n [len x batch x nfeats].\n memory_bank (FloatTensor): output(tensor sequence) from the encoder\n RNN of size (src_len x batch x hidden_size).\n state (FloatTensor): hidden state from the encoder RNN for\n initializing the decoder.\n memory_lengths (LongTensor): the source memory_bank lengths.\n Returns:\n decoder_final (Variable): final hidden state from the decoder.\n decoder_outputs ([FloatTensor]): an array of output of every time\n step from the decoder.\n attns (dict of (str, [FloatTensor]): a dictionary of different\n type of attention Tensor array of every time\n step from the decoder.\n \"\"\"\n assert not self._copy # TODO, no support yet.\n assert not self._coverage # TODO, no support yet.\n\n # Initialize local and return variables.\n attns = {}\n emb = self.embeddings(tgt)\n\n # Run the forward pass of the RNN.\n if isinstance(self.rnn, nn.GRU):\n rnn_output, decoder_final = self.rnn(emb, state.hidden[0])\n else:\n rnn_output, decoder_final = self.rnn(emb, state.hidden)\n\n # Check\n tgt_len, tgt_batch, _ = tgt.size()\n output_len, output_batch, _ = rnn_output.size()\n aeq(tgt_len, output_len)\n aeq(tgt_batch, output_batch)\n # END\n\n # Calculate the attention.\n decoder_outputs, p_attn = self.attn(\n rnn_output.transpose(0, 1).contiguous(),\n memory_bank.transpose(0, 1),\n memory_lengths=memory_lengths\n )\n attns[\"std\"] = p_attn\n\n # Calculate the context gate.\n if self.context_gate is not None:\n decoder_outputs = self.context_gate(\n emb.view(-1, emb.size(2)),\n rnn_output.view(-1, rnn_output.size(2)),\n decoder_outputs.view(-1, decoder_outputs.size(2))\n )\n decoder_outputs = \\\n decoder_outputs.view(tgt_len, tgt_batch, self.hidden_size)\n\n decoder_outputs = self.dropout(decoder_outputs)\n return decoder_outputs, attns, decoder_final\n\n def _build_rnn(self, rnn_type, **kwargs):\n rnn, _ = rnn_factory(rnn_type, **kwargs)\n return rnn\n\n @property\n def _input_size(self):\n \"\"\"\n Private helper returning the number of expected features.\n \"\"\"\n return self.embeddings.embedding_size\n\n\nclass InputFeedRNNDecoder(RNNDecoderBase):\n \"\"\"\n Input feeding based decoder. See :obj:`RNNDecoderBase` for options.\n\n Based around the input feeding approach from\n \"Effective Approaches to Attention-based Neural Machine Translation\"\n :cite:`Luong2015`\n\n\n .. mermaid::\n\n graph BT\n A[Input n-1]\n AB[Input n]\n subgraph RNN\n E[Pos n-1]\n F[Pos n]\n E --> F\n end\n G[Encoder]\n H[Memory_Bank n-1]\n A --> E\n AB --> F\n E --> H\n G --> H\n \"\"\"\n\n def _run_forward_pass(self, tgt, memory_bank, state, memory_lengths=None):\n \"\"\"\n See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.\n \"\"\"\n # Additional args check.\n input_feed = state.input_feed.squeeze(0)\n input_feed_batch, _ = input_feed.size()\n tgt_len, tgt_batch, _ = tgt.size()\n aeq(tgt_batch, input_feed_batch)\n # END Additional args check.\n\n # Initialize local and return variables.\n decoder_outputs = []\n attns = {\"std\": []}\n if self._copy:\n attns[\"copy\"] = []\n if self._coverage:\n attns[\"coverage\"] = []\n\n emb = self.embeddings(tgt)\n assert emb.dim() == 3 # len x batch x embedding_dim\n\n hidden = state.hidden\n coverage = state.coverage.squeeze(0) \\\n if state.coverage is not None else None\n\n # Input feed concatenates hidden state with\n # input at every time step.\n for i, emb_t in enumerate(emb.split(1)):\n emb_t = emb_t.squeeze(0)\n decoder_input = torch.cat([emb_t, input_feed], 1)\n\n rnn_output, hidden = self.rnn(decoder_input, hidden)\n decoder_output, p_attn = self.attn(\n rnn_output, memory_bank.transpose(0, 1),\n memory_lengths=memory_lengths)\n if self.context_gate is not None:\n # TODO: context gate should be employed\n # instead of second RNN transform.\n decoder_output = self.context_gate(\n decoder_input, rnn_output, decoder_output\n )\n decoder_output = self.dropout(decoder_output)\n input_feed = decoder_output\n\n decoder_outputs += [decoder_output]\n attns[\"std\"] += [p_attn]\n\n # Update the coverage attention.\n if self._coverage:\n coverage = coverage + p_attn \\\n if coverage is not None else p_attn\n attns[\"coverage\"] += [coverage]\n\n # Run the forward pass of the copy attention layer.\n if self._copy and not self._reuse_copy_attn:\n _, copy_attn = self.copy_attn(decoder_output,\n memory_bank.transpose(0, 1))\n attns[\"copy\"] += [copy_attn]\n elif self._copy:\n attns[\"copy\"] = attns[\"std\"]\n # Return result.\n return decoder_outputs, attns, hidden\n\n def _build_rnn(self, rnn_type, input_size,\n hidden_size, num_layers, dropout):\n assert not rnn_type == \"SRU\", \"SRU doesn't support input feed! \" \\\n \"Please set -input_feed 0!\"\n if rnn_type == \"LSTM\":\n stacked_cell = onmt.modules.StackedLSTM\n else:\n stacked_cell = onmt.modules.StackedGRU\n return stacked_cell(num_layers, input_size,\n hidden_size, dropout)\n\n @property\n def _input_size(self):\n \"\"\"\n Using input feed by concatenating input with attention vectors.\n \"\"\"\n return self.embeddings.embedding_size + self.hidden_size\n\n\nclass NMTModel(nn.Module):\n \"\"\"\n Core trainable object in OpenNMT. Implements a trainable interface\n for a simple, generic encoder + decoder model.\n\n Args:\n encoder (:obj:`EncoderBase`): an encoder object\n decoder (:obj:`RNNDecoderBase`): a decoder object\n multi<gpu (bool): setup for multigpu support\n \"\"\"\n def __init__(self, encoder, decoder, multigpu=False):\n self.multigpu = multigpu\n super(NMTModel, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n\n def forward(self, src, tgt, lengths, dec_state=None):\n \"\"\"Forward propagate a `src` and `tgt` pair for training.\n Possible initialized with a beginning decoder state.\n\n Args:\n src (:obj:`Tensor`):\n a source sequence passed to encoder.\n typically for inputs this will be a padded :obj:`LongTensor`\n of size `[len x batch x features]`. however, may be an\n image or other generic input depending on encoder.\n tgt (:obj:`LongTensor`):\n a target sequence of size `[tgt_len x batch]`.\n lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.\n dec_state (:obj:`DecoderState`, optional): initial decoder state\n Returns:\n (:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):\n\n * decoder output `[tgt_len x batch x hidden]`\n * dictionary attention dists of `[tgt_len x batch x src_len]`\n * final decoder state\n \"\"\"\n tgt = tgt[:-1] # exclude last target from inputs\n\n enc_final, memory_bank = self.encoder(src, lengths)\n enc_state = \\\n self.decoder.init_decoder_state(src, memory_bank, enc_final)\n decoder_outputs, attns, dec_state = \\\n self.decoder(tgt, memory_bank,\n enc_state if dec_state is None\n else dec_state,\n memory_lengths=lengths)\n if self.multigpu:\n # Not yet supported on multi-gpu\n dec_state = None\n attns = None\n return decoder_outputs, attns, dec_state\n\n\nclass DecoderState(object):\n \"\"\"Interface for grouping together the current state of a recurrent\n decoder. In the simplest case just represents the hidden state of\n the model. But can also be used for implementing various forms of\n input_feeding and non-recurrent models.\n\n Modules need to implement this to utilize beam search decoding.\n \"\"\"\n def detach(self):\n for h in self._all:\n if h is not None:\n h.detach()\n\n def beam_update(self, idx, positions, beam_size):\n for e in self._all:\n a, br, d = e.size()\n sent_states = e.view(a, beam_size, br // beam_size, d)[:, :, idx]\n sent_states.data.copy_(\n sent_states.data.index_select(1, positions))\n\n\nclass RNNDecoderState(DecoderState):\n def __init__(self, hidden_size, rnnstate=None):\n \"\"\"\n Args:\n hidden_size (int): the size of hidden layer of the decoder.\n rnnstate: final hidden state from the encoder.\n transformed to shape: layers x batch x (directions*dim).\n \"\"\"\n if not isinstance(rnnstate, tuple):\n self.hidden = (rnnstate,)\n else:\n self.hidden = rnnstate\n batch_size = self.hidden[0].size(1)\n \n self.coverage = None\n\n # Init the input feed.\n h_size = (batch_size, hidden_size)\n self.input_feed = Variable(self.hidden[0].data.new(*h_size).zero_(),\n requires_grad=False).unsqueeze(0)\n\n @property\n def _all(self):\n return self.hidden + (self.input_feed,)\n\n def update_state(self, rnnstate, input_feed, coverage): \n # rnnstate is a list of num_layers elements where each state corresponds to one layer\n\n if not isinstance(rnnstate, tuple):\n self.hidden = (rnnstate,)\n else:\n self.hidden = rnnstate\n self.input_feed = input_feed\n self.coverage = coverage\n\n def repeat_beam_size_times(self, beam_size):\n \"\"\" Repeat beam_size times along batch dimension. \"\"\"\n vars = [Variable(e.data.repeat(1, beam_size, 1), volatile=True)\n for e in self._all]\n self.hidden = tuple(vars[:-1])\n self.input_feed = vars[-1]\n\nclass RNNTrigramsEncoder(RNNEncoder):\n\n def __init__(self, rnn_type, bidirectional, num_layers,\n hidden_size, dropout=0.0, embeddings=None,\n use_bridge=False):\n super(RNNTrigramsEncoder, self).__init__\\\n (rnn_type, bidirectional, num_layers,\n hidden_size, dropout, embeddings,\n use_bridge)\n self.rnn_trigram, self.no_pack_padded_seq = \\\n rnn_factory(rnn_type,\n input_size=embeddings.embedding_size,\n hidden_size=embeddings.embedding_size//2,\n num_layers=1,\n dropout=dropout,\n bidirectional=bidirectional)\n self.combine_states = nn.Linear(embeddings.embedding_size, embeddings.embedding_size)\n self.rnn_size = embeddings.embedding_size\n self.dropout = nn.Dropout(dropout)\n\n def _check_args(self, input, lengths=None, hidden=None):\n n_chars, n_batch, n_feats = input.size()\n if lengths is not None:\n n_batch_, = lengths[0].size()\n aeq(n_batch, n_batch_)\n\n def forward(self, src, batch_size, lengths=None, encoder_state=None):\n \"See :obj:`EncoderBase.forward()`\"\n self._check_args(src, lengths, encoder_state)\n\n emb = self.embeddings(src)\n trigrams_len, batch_seqlen, emb_dim = emb.size()\n packed_emb = emb\n if lengths is not None: #and not self.no_pack_padded_seq:\n # # Lengths data is wrapped inside a Variable.\n lengs = lengths[0].view(-1).data.tolist()\n seq_lens = lengths[1].view(-1).tolist()\n # packed_emb = pack(emb, lengths)\n\n words_memory_bank, trigram_final = self.rnn_trigram(packed_emb)\n back_state = words_memory_bank[0, :, self.rnn_size//2:]\n forw_state = []\n #for i in range(words_memory_bank.size(1)):\n # forw_state += [words_memory_bank[lengs[i]-1, i, :self.rnn_size//2].view(1, self.rnn_size//2)]\n forw_state = words_memory_bank[-1, :, :self.rnn_size//2]\n #forw_state = torch.cat(forw_state)\n word_embs = self.combine_states(torch.cat([back_state, forw_state], dim=-1))\n word_embs = self.dropout(word_embs)\n word_embs = torch.cat(word_embs.unsqueeze(1).split(batch_seqlen // batch_size, dim=0), dim=1)\n word_embs = pack(word_embs, seq_lens)\n memory_bank, encoder_final = self.rnn(word_embs, encoder_state)\n\n if lengths is not None and not self.no_pack_padded_seq:\n memory_bank = unpack(memory_bank)[0]\n if self.use_bridge:\n encoder_final = self._bridge(self.dropout(encoder_final))\n return encoder_final, memory_bank\n\n\nclass RNNWordDecoderBase(nn.Module):\n \"\"\"\n Base recurrent attention-based decoder class for word decoding.\n Specifies the interface used by different decoder types\n and required by :obj:`onmt.Models.TargetCharNMTModel`.\n \"\"\"\n\n def __init__(self, rnn_type, bidirectional_encoder, num_layers,\n hidden_size, attn_type=\"general\",\n coverage_attn=False, context_gate=None,\n copy_attn=False, dropout=0.0, embeddings=None,\n reuse_copy_attn=False):\n super(RNNWordDecoderBase, self).__init__()\n\n # Basic attributes.\n self.decoder_type = 'rnn'\n self.bidirectional_encoder = bidirectional_encoder\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.embeddings = embeddings\n self.dropout = nn.Dropout(dropout)\n\n # Build the RNN.\n self.rnn = self._build_rnn(rnn_type,\n input_size=self._input_size,\n hidden_size=hidden_size,\n num_layers=num_layers,\n dropout=dropout)\n\n # Set up the context gate.\n self.context_gate = None\n if context_gate is not None:\n self.context_gate = onmt.modules.context_gate_factory(\n context_gate, self._input_size,\n hidden_size, hidden_size, hidden_size\n )\n\n # Set up the standard attention.\n self._coverage = coverage_attn\n self.attn = onmt.modules.GlobalAttention(\n hidden_size, coverage=coverage_attn,\n attn_type=attn_type\n )\n\n # Set up a separated copy attention layer, if needed.\n self._copy = False\n if copy_attn and not reuse_copy_attn:\n self.copy_attn = onmt.modules.GlobalAttention(\n hidden_size, attn_type=attn_type\n )\n if copy_attn:\n self._copy = True\n self._reuse_copy_attn = reuse_copy_attn\n\n def forward(self, tgt, memory_bank, batch_size, state, memory_lengths=None):\n \"\"\"\n Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n memory_bank (`FloatTensor`): vectors from the encoder\n `[src_len x batch x hidden]`.\n state (:obj:`onmt.Models.DecoderState`):\n decoder state object to initialize the decoder\n memory_lengths (`LongTensor`): the padded source lengths\n `[batch]`.\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_state: final hidden state from the decoder\n * attns: distribution over src at each tgt\n `[tgt_len x batch x src_len]`.\n \"\"\"\n # Check\n assert isinstance(state, RNNDecoderState)\n # END\n\n # Run the forward pass of the RNN.\n attns, outputs, hidden, ctx = self._run_forward_pass(\n tgt, memory_bank, batch_size, state, memory_lengths=memory_lengths)\n\n # Update the state with the result.\n coverage = None\n if \"coverage\" in attns:\n coverage = attns[\"coverage\"][-1].unsqueeze(0)\n state.update_state(hidden, ctx, coverage)\n\n\n for k in attns:\n attns[k] = torch.stack(attns[k])\n\n return attns, outputs, state\n\n def init_decoder_state(self, src, memory_bank, encoder_final):\n def _fix_enc_hidden(h):\n # The encoder hidden is (layers*directions) x batch x dim.\n # We need to convert it to layers x batch x (directions*dim).\n if self.bidirectional_encoder:\n h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)\n return h\n\n if isinstance(encoder_final, tuple): # LSTM\n return RNNDecoderState(self.hidden_size,\n tuple([_fix_enc_hidden(enc_hid)\n for enc_hid in encoder_final]))\n else: # GRU\n return RNNDecoderState(self.hidden_size,\n _fix_enc_hidden(encoder_final))\n\n\n\nclass RNNCharDecoderBase(nn.Module):\n \"\"\"\n Base recurrent attention-based decoder class for character decoding.\n Specifies the interface used by different decoder types\n and required by :obj:`onmt.Models.TargetCharNMTModel`.\n\n\n Args:\n rnn_type (:obj:`str`):\n style of recurrent unit to use, one of [RNN, LSTM, GRU, SRU]\n bidirectional_encoder (bool) : use with a bidirectional encoder\n num_layers (int) : number of stacked layers\n hidden_size (int) : hidden size of each layer\n attn_type (str) : see :obj:`onmt.modules.GlobalAttention`\n coverage_attn (str): see :obj:`onmt.modules.GlobalAttention`\n context_gate (str): see :obj:`onmt.modules.ContextGate`\n copy_attn (bool): setup a separate copy attention mechanism\n dropout (float) : dropout value for :obj:`nn.Dropout`\n embeddings (:obj:`onmt.modules.Embeddings`): embedding module to use\n \"\"\"\n\n def __init__(self, rnn_type, bidirectional_encoder, num_layers,\n hidden_size, attn_type=\"general\",\n coverage_attn=False, context_gate=None,\n copy_attn=False, dropout=0.0, embeddings=None,\n reuse_copy_attn=False):\n super(RNNCharDecoderBase, self).__init__()\n\n # Basic attributes.\n self.decoder_type = 'rnn'\n self.bidirectional_encoder = bidirectional_encoder\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.embeddings = embeddings\n self.dropout = nn.Dropout(dropout)\n\n # Build the RNN.\n self.char_decoder = self._build_rnn(rnn_type,\n input_size=hidden_size,\n hidden_size=hidden_size,\n num_layers=1,\n dropout=dropout)\n\n\n def forward(self, tgt, batch_size, hiddens, char_state, translate=False):\n \"\"\"\n Args:\n tgt (`LongTensor`): sequences of padded tokens\n `[tgt_len x batch x nfeats]`.\n state (:obj:`onmt.Models.DecoderState`):\n decoder state object to initialize the decoder\n Returns:\n (`FloatTensor`,:obj:`onmt.Models.DecoderState`,`FloatTensor`):\n * decoder_outputs: output from the decoder (after attn)\n `[tgt_len x batch x hidden]`.\n * decoder_state: final hidden state from the char decoder\n \"\"\"\n\n # Check\n assert isinstance(char_state, RNNDecoderState)\n # END\n\n # Run the forward pass of the RNN.\n decoder_outputs, char_hidden = self._run_forward_pass(\n tgt, batch_size, hiddens, char_state, translate)\n\n # Update the state with the result.\n char_state.hidden = char_hidden\n\n # Concatenates sequence of tensors along a new dimension.\n decoder_outputs = torch.stack(decoder_outputs)\n\n return decoder_outputs, char_state\n\n def init_decoder_state(self, hidden, embedding_size):\n\n #char_hidden = self.tanh(self.word2char(hidden))\n char_hidden = hidden\n if isinstance(char_hidden[0], tuple): # LSTM\n return RNNDecoderState(embedding_size, tuple([char_hidden]))\n else: # GRU\n return RNNDecoderState(embedding_size, char_hidden)\n\n\n\nclass StdCharRNNDecoder(RNNCharDecoderBase):\n \"\"\"\n Input feeding based decoder. See :obj:`RNNDecoderBase` for options.\n\n Based around the input feeding approach from\n \"Effective Approaches to Attention-based Neural Machine Translation\"\n :cite:`Luong2015`\n \"\"\"\n def __init__(self, rnn_type, bidirectional_encoder, num_layers,\n hidden_size, attn_type=\"general\",\n coverage_attn=False, context_gate=None,\n copy_attn=False, dropout=0.0, embeddings=None,\n reuse_copy_attn=False):\n super(StdCharRNNDecoder, self)\\\n .__init__(rnn_type, bidirectional_encoder, num_layers,\n hidden_size, attn_type, coverage_attn,\n context_gate, copy_attn, dropout, embeddings,\n reuse_copy_attn)\n\n\n def _run_forward_pass(self, tgt, batch_size, hiddens, char_state, translate=False):\n\n # Initialize local and return variables.\n decoder_outputs = []\n\n chars_len, batch_seqlen, _ = tgt.size()\n seqlen = batch_seqlen // batch_size\n \n char_embs = self.embeddings(tgt)\n\n for i, hidden_w in enumerate(hiddens):\n # Initialize the char LSTM input feed and the state at the beginning of each word.\n if translate == False:\n char_s = (hidden_w.unsqueeze(0),)\n else:\n char_s = char_state.hidden\n\n for j in range(chars_len):\n charrnn_input = char_embs[j,i*batch_size:(i+1)*batch_size,:]\n charrnn_output, char_s = self.char_decoder(charrnn_input, char_s)\n charrnn_output = charrnn_output + charrnn_input\n charrnn_output = self.dropout(charrnn_output)\n decoder_outputs += [charrnn_output]\n\n return decoder_outputs, char_s\n\n def _build_rnn(self, rnn_type, input_size,\n hidden_size, num_layers, dropout):\n assert not rnn_type == \"SRU\", \"SRU doesn't support input feed! \" \\\n \"Please set -input_feed 0!\"\n if rnn_type == \"LSTM\":\n stacked_cell = onmt.modules.StackedLSTM\n else:\n stacked_cell = onmt.modules.StackedGRU\n return stacked_cell(num_layers, input_size,\n hidden_size, dropout)\n\n @property\n def _input_size(self):\n \"\"\"\n Using input feed by concatenating input with attention vectors.\n \"\"\"\n return self.embeddings.embedding_size + self.hidden_size\n\n\n\nclass StdWordRNNDecoder(RNNWordDecoderBase):\n \"\"\"\n Input feeding based decoder. See :obj:`RNNDecoderBase` for options.\n\n Based around the input feeding approach from\n \"Effective Approaches to Attention-based Neural Machine Translation\"\n :cite:`Luong2015`\n \"\"\"\n\n def __init__(self, rnn_type, bidirectional_encoder, num_layers,\n hidden_size, attn_type=\"general\",\n coverage_attn=False, context_gate=None,\n copy_attn=False, dropout=0.0, embeddings=None,\n reuse_copy_attn=False):\n super(StdWordRNNDecoder, self)\\\n .__init__(rnn_type, bidirectional_encoder, num_layers,\n hidden_size, attn_type, coverage_attn,\n context_gate, copy_attn, dropout, embeddings,\n reuse_copy_attn)\n self.char_rnn, _ = \\\n rnn_factory(rnn_type,\n input_size=embeddings.embedding_size,\n hidden_size=embeddings.embedding_size//2,\n num_layers=1,\n dropout=dropout,\n bidirectional=True)\n self.combine_states = nn.Linear(embeddings.embedding_size, embeddings.embedding_size)\n self.rnn_size = embeddings.embedding_size\n self.dropout = nn.Dropout(p=dropout)\n self.word2char = nn.Linear(self.rnn_size*3, self.rnn_size, bias=True)\n self.tanh = nn.Tanh()\n\n def _run_forward_pass(self, tgt, memory_bank, batch_size, state, memory_lengths=None):\n \"\"\"\n See StdRNNDecoder._run_forward_pass() for description\n of arguments and return values.\n \"\"\"\n\n # Additional args check.\n input_feed = state.input_feed #.squeeze(0)\n\n # Initialize local and return variables.\n outputs = []\n attns = {\"std\": []}\n if self._copy:\n attns[\"copy\"] = []\n if self._coverage:\n attns[\"coverage\"] = []\n\n # Predict word representations with the bi-directional char-rnn\n char_embs = self.embeddings(tgt)\n assert char_embs.dim() == 3 # len x batch x embedding_dim\n chars_len, batch_seqlen, emb_dim = char_embs.size()\n \n words_memory_bank, final_char = self.char_rnn(char_embs)\n words_memory_bank = words_memory_bank + char_embs # resid connection\n\n back_state = words_memory_bank[0, :, self.rnn_size//2:]\n forw_state = []\n forw_state = words_memory_bank[-1, :, :self.rnn_size//2]\n word_embs = self.combine_states(torch.cat([back_state, forw_state], dim=-1))\n word_embs = torch.cat(word_embs.unsqueeze(1).split(batch_seqlen // batch_size, dim=0), dim=1) # n_words x batch_size x dim\n\n word_embs = self.dropout(word_embs)\n\n tgt_len, tgt_batch, _ = word_embs.size()\n\n hidden = state.hidden\n coverage = state.coverage.squeeze(0) \\\n if state.coverage is not None else None\n\n # Input feed concatenates hidden state with\n # input at every time step.\n for i, emb_t in enumerate(word_embs.split(1)):\n\n if emb_t.dim() > 2:\n emb_t = emb_t.squeeze(0)\n if input_feed.dim() > 2:\n input_feed = input_feed.squeeze(0)\n\n # Get the word prediction with the word-level RNN.\n wordrnn_input = torch.cat([emb_t, input_feed], 1)\n wordrnn_output, hidden = self.rnn(wordrnn_input, hidden)\n wordrnn_output = wordrnn_output + emb_t # resid connection\n\n # Get the predicted word using the attention.\n attn_out, p_attn, ctx = self.attn(\n wordrnn_output,\n memory_bank.transpose(0, 1),\n memory_lengths=memory_lengths)\n if self.context_gate is not None:\n # TODO: context gate should be employed\n # instead of second RNN transform.\n word_pred = self.context_gate(\n wordrnn_input, wordrnn_output, word_pred\n )\n\n ctx = self.dropout(ctx)\n input_feed = ctx\n\n word_out = self.dropout(attn_out) \n\n outputs += [word_out]\n attns[\"std\"] += [p_attn]\n\n # Update the coverage attention.\n if self._coverage:\n coverage = coverage + p_attn \\\n if coverage is not None else p_attn\n attns[\"coverage\"] += [coverage]\n\n # Run the forward pass of the copy attention layer.\n if self._copy and not self._reuse_copy_attn:\n _, copy_attn = self.copy_attn(word_pred,\n memory_bank.transpose(0, 1))\n attns[\"copy\"] += [copy_attn]\n elif self._copy:\n attns[\"copy\"] = attns[\"std\"]\n\n # Return result.\n return attns, outputs, hidden, ctx\n\n def _build_rnn(self, rnn_type, input_size,\n hidden_size, num_layers, dropout):\n assert not rnn_type == \"SRU\", \"SRU doesn't support input feed! \" \\\n \"Please set -input_feed 0!\"\n if rnn_type == \"LSTM\":\n stacked_cell = onmt.modules.StackedLSTM\n else:\n stacked_cell = onmt.modules.StackedGRU\n return stacked_cell(num_layers, input_size,\n hidden_size, dropout)\n\n @property\n def _input_size(self):\n \"\"\"\n Using input feed by concatenating input with attention vectors.\n \"\"\"\n return self.embeddings.embedding_size + self.hidden_size\n\nclass NMTSourceTrigramModel(nn.Module):\n \"\"\"\n Core trainable object in OpenNMT. Implements a trainable interface\n for a simple, generic encoder + decoder model.\n\n Args:\n encoder (:obj:`EncoderBase`): an encoder object\n decoder (:obj:`RNNDecoderBase`): a decoder object\n multi<gpu (bool): setup for multigpu support\n \"\"\"\n def __init__(self, encoder, decoder, multigpu=False):\n self.multigpu = multigpu\n super(NMTSourceTrigramModel, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.decoder.bidirectional_encoder = True\n\n def forward(self, src, tgt, lengths, batch_size, dec_state=None):\n \"\"\"Forward propagate a `src` and `tgt` pair for training.\n Possible initialized with a beginning decoder state.\n\n Args:\n src (:obj:`Tensor`):\n a source sequence passed to encoder.\n typically for inputs this will be a padded :obj:`LongTensor`\n of size `[len x batch x features]`. however, may be an\n image or other generic input depending on encoder.\n tgt (:obj:`LongTensor`):\n a target sequence of size `[tgt_len x batch]`.\n lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.\n dec_state (:obj:`DecoderState`, optional): initial decoder state\n Returns:\n (:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):\n\n * decoder output `[tgt_len x batch x hidden]`\n * dictionary attention dists of `[tgt_len x batch x src_len]`\n * final decoder state\n \"\"\"\n tgt = tgt[:-1] # exclude last target from inputs\n enc_final, memory_bank = self.encoder(src, batch_size, lengths)\n enc_state = Variable(enc_final[0].data.new(enc_final[0].size()).zero_().unsqueeze(0).repeat(self.decoder.num_layers, 1, 2))\n enc_state = RNNDecoderState(self.decoder.hidden_size, enc_state)\n #self.decoder.init_decoder_state(src, memory_bank, enc_final)\n decoder_outputs, dec_state, attns = \\\n self.decoder(tgt, memory_bank,\n enc_state if dec_state is None\n else dec_state,\n memory_lengths=lengths[1])\n if self.multigpu:\n # Not yet supported on multi-gpu\n dec_state = None\n attns = None\n return decoder_outputs, attns, dec_state\n\nclass NMTTargetCharModel(nn.Module):\n \"\"\"\n Core trainable object in OpenNMT. Implements a trainable interface\n for a simple, generic encoder + decoder model.\n\n Args:\n encoder (:obj:`EncoderBase`): an encoder object\n decoder (:obj:`RNNDecoderBase`): a decoder object\n multi<gpu (bool): setup for multigpu support\n \"\"\"\n def __init__(self, encoder, decoder1, decoder2, multigpu=False):\n self.multigpu = multigpu\n super(NMTTargetCharModel, self).__init__()\n self.encoder = encoder\n self.decoder1 = decoder1\n self.decoder2 = decoder2\n self.decoder1.bidirectional_encoder = True\n\n def forward(self, src, tgt, lengths, batch_size, dec_state=None):\n \"\"\"Forward propagate a `src` and `tgt` pair for training.\n Possible initialized with a beginning decoder state.\n\n Args:\n src (:obj:`Tensor`):\n a source sequence passed to encoder.\n typically for inputs this will be a padded :obj:`LongTensor`\n of size `[len x batch x features]`. however, may be an\n image or other generic input depending on encoder.\n tgt (:obj:`LongTensor`):\n a target sequence of size `[tgt_len x batch]`.\n lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.\n dec_state (:obj:`DecoderState`, optional): initial decoder state\n Returns:\n (:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):\n\n * decoder output `[tgt_len x batch x hidden]`\n * dictionary attention dists of `[tgt_len x batch x src_len]`\n * final decoder state\n \"\"\"\n\n # Exclude last target (EOS) from input sentences to feed into the word decoder\n tgt1 = torch.stack(tgt.split(tgt.size(1) // batch_size, dim=1), dim=2)\n tgt1 = tgt1[:,:-1,:,:]\n x = (tgt1 != 3).long()*tgt1 + (tgt1 == 3).long()\n tgt1 = x\n tgt1 = torch.cat(tgt1.unbind(2), dim=1)\n\n # Exclude first target (BOS) from input sentences to feed into the character decoder\n tgt2 = torch.stack(tgt.split(tgt.size(1) // batch_size, dim=1), dim=2)\n tgt2 = tgt2[:,1::,:,:]\n tgt2 = torch.cat(tgt2.unbind(2), dim=1)\n\n enc_final, memory_bank = self.encoder(src, lengths)\n \n enc_state = \\\n self.decoder1.init_decoder_state(src, memory_bank, enc_final)\n\n attns, ctxs, dec_state = \\\n self.decoder1(tgt1, memory_bank, batch_size,\n enc_state if dec_state is None\n else dec_state, \n memory_lengths=lengths)\n\n # Reorder tgt as word_1- batch_1, word1-batch_2 ... word_n-1-batch_n, word_n-batch,n\n newbatch = []\n seqlen = tgt2.size(1) // batch_size\n out = tgt2.split(seqlen, dim=1)\n for k in range(seqlen):\n for l in range(batch_size):\n newbatch.append(out[l][:,k,:])\n tgt2_new = torch.stack(newbatch, dim=1)\n tgt2_new = tgt2_new[:-1,:,:] #exclude the last char from the input\n tgt2_neww = (tgt2_new != 4).long()*tgt2_new + (tgt2_new == 4).long()\n\n char_state = \\\n self.decoder2.init_decoder_state((dec_state.input_feed,), self.decoder1.embeddings.embedding_size)\n\n \n decoder_outputs, char_state = \\\n self.decoder2(tgt2_neww, batch_size,\n ctxs, char_state)\n\n if self.multigpu:\n # Not yet supported on multi-gpu\n dec_state = None\n attns = None\n\n return decoder_outputs, attns, dec_state\n\nclass CharNMTModel(nn.Module):\n \"\"\"\n Core trainable object in OpenNMT. Implements a trainable interface\n for a simple, generic encoder + decoder model.\n\n Args:\n encoder (:obj:`EncoderBase`): an encoder object\n decoder (:obj:`RNNDecoderBase`): a decoder object\n multi<gpu (bool): setup for multigpu support\n \"\"\"\n def __init__(self, encoder, decoder, multigpu=False):\n self.multigpu = multigpu\n super(CharNMTModel, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.decoder.bidirectional_encoder = True\n\n def forward(self, src, tgt, lengths, batch_size, dec_state=None):\n \"\"\"Forward propagate a `src` and `tgt` pair for training.\n Possible initialized with a beginning decoder state.\n\n Args:\n src (:obj:`Tensor`):\n a source sequence passed to encoder.\n typically for inputs this will be a padded :obj:`LongTensor`\n of size `[len x batch x features]`. however, may be an\n image or other generic input depending on encoder.\n tgt (:obj:`LongTensor`):\n a target sequence of size `[tgt_len x batch]`.\n lengths(:obj:`LongTensor`): the src lengths, pre-padding `[batch]`.\n dec_state (:obj:`DecoderState`, optional): initial decoder state\n Returns:\n (:obj:`FloatTensor`, `dict`, :obj:`onmt.Models.DecoderState`):\n\n * decoder output `[tgt_len x batch x hidden]`\n * dictionary attention dists of `[tgt_len x batch x src_len]`\n * final decoder state\n \"\"\"\n tgt = tgt[:-1] # exclude last target from inputs\n\n enc_final, memory_bank = self.encoder(src, batch_size, lengths)\n enc_state = Variable(enc_final[0].data.new(enc_final[0].size()).zero_().unsqueeze(0).repeat(self.decoder.num_layers, 1, 2))\n enc_state = RNNDecoderState(self.decoder.hidden_size, enc_state)\n #self.decoder.init_decoder_state(src, memory_bank, enc_final)\n self.decoder.batch_size = batch_size\n decoder_outputs, attns, dec_state = \\\n self.decoder(tgt, memory_bank, batch_size,\n enc_state if dec_state is None\n else dec_state,\n memory_lengths=lengths[1])\n if self.multigpu:\n # Not yet supported on multi-gpu\n dec_state = None\n attns = None\n return decoder_outputs, attns, dec_state\n\n"
] | [
[
"torch.nn.Dropout",
"torch.cat",
"torch.nn.utils.rnn.pack_padded_sequence",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.nn.utils.rnn.pad_packed_sequence",
"torch.nn.functional.relu",
"torch.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhondr/cuda-fft | [
"3d1f29e9cd4e289773cf1a556f853bc94346a768"
] | [
"plot_AWS_FFT.py"
] | [
"import numpy as np\nimport matplotlib.pyplot as plt\n\ndata_file1 = np.loadtxt('CpuTimeData_onlyfft_AWS_c5.txt',delimiter='\t')\ndata_file2 = np.loadtxt('GpuTimeData_onlyfft_AWS_p3.txt',delimiter='\t')\ndata_file3 = np.loadtxt('FpgaTimeData_onlyfft.txt',delimiter='\t')\n\nfftsize = data_file1[:,0]\ncputime = data_file1[:,1]\ngputime = data_file2[:,1]\nfpgatime = data_file3[:,1]\n\nplt.plot(fftsize,cputime,fftsize,gputime,fftsize,fpgatime)\nplt.legend(['CPU','GPU','FPGA'],loc='best')\n# plt.plot(fftsize,cputime,fftsize,gputime)\n# plt.legend(['CPU','GPU'],loc='best')\nplt.xlabel('fftsize')\nplt.ylabel('time in microseconds (µs)')\nplt.show()\n\nplt.semilogy(fftsize,cputime,fftsize,gputime,fftsize,fpgatime)\nplt.legend(['CPU','GPU','FPGA'],loc='best')\n# plt.semilogy(fftsize,cputime,fftsize,gputime)\n# plt.legend(['CPU','GPU'],loc='best')\nplt.xlabel('fftsize')\nplt.ylabel('time in microseconds (µs)')\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.semilogy",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wgifford/ray | [
"8acb469b047cd9b327c9477a13b030eb7357860e"
] | [
"python/ray/data/datasource/file_based_datasource.py"
] | [
"import logging\nfrom typing import Optional, List, Tuple, Union, Any, TYPE_CHECKING\nfrom urllib.parse import urlparse\n\nif TYPE_CHECKING:\n import pyarrow\n\nfrom ray.data.impl.arrow_block import (ArrowRow, DelegatingArrowBlockBuilder)\nfrom ray.data.impl.block_list import BlockMetadata\nfrom ray.data.datasource.datasource import Datasource, ReadTask\nfrom ray.util.annotations import DeveloperAPI\nfrom ray.data.impl.util import _check_pyarrow_version\n\nlogger = logging.getLogger(__name__)\n\n\n@DeveloperAPI\nclass FileBasedDatasource(Datasource[Union[ArrowRow, Any]]):\n \"\"\"File-based datasource, for reading and writing files.\n\n This class should not be used directly, and should instead be subclassed\n and tailored to particular file formats. Classes deriving from this class\n must implement _read_file().\n\n Current subclasses: JSONDatasource, CSVDatasource\n \"\"\"\n\n def prepare_read(\n self,\n parallelism: int,\n paths: Union[str, List[str]],\n filesystem: Optional[\"pyarrow.fs.FileSystem\"] = None,\n schema: Optional[Union[type, \"pyarrow.lib.Schema\"]] = None,\n **reader_args) -> List[ReadTask]:\n \"\"\"Creates and returns read tasks for a file-based datasource.\n \"\"\"\n _check_pyarrow_version()\n import pyarrow as pa\n import numpy as np\n\n paths, filesystem = _resolve_paths_and_filesystem(paths, filesystem)\n paths, file_infos = _expand_paths(paths, filesystem)\n file_sizes = [file_info.size for file_info in file_infos]\n\n read_file = self._read_file\n\n filesystem = _wrap_s3_serialization_workaround(filesystem)\n\n def read_files(\n read_paths: List[str],\n fs: Union[\"pyarrow.fs.FileSystem\", _S3FileSystemWrapper]):\n logger.debug(f\"Reading {len(read_paths)} files.\")\n if isinstance(fs, _S3FileSystemWrapper):\n fs = fs.unwrap()\n builder = DelegatingArrowBlockBuilder()\n for read_path in read_paths:\n with fs.open_input_stream(read_path) as f:\n data = read_file(f, read_path, **reader_args)\n if isinstance(data, pa.Table) or isinstance(\n data, np.ndarray):\n builder.add_block(data)\n else:\n builder.add(data)\n return builder.build()\n\n read_tasks = []\n for read_paths, file_sizes in zip(\n np.array_split(paths, parallelism),\n np.array_split(file_sizes, parallelism)):\n if len(read_paths) <= 0:\n continue\n\n if self._rows_per_file() is None:\n num_rows = None\n else:\n num_rows = len(read_paths) * self._rows_per_file()\n read_task = ReadTask(\n lambda read_paths=read_paths: read_files(\n read_paths, filesystem),\n BlockMetadata(\n num_rows=num_rows,\n size_bytes=sum(file_sizes),\n schema=schema,\n input_files=read_paths)\n )\n read_tasks.append(read_task)\n\n return read_tasks\n\n def _rows_per_file(self):\n \"\"\"Returns the number of rows per file, or None if unknown.\n \"\"\"\n return None\n\n def _read_file(self, f: \"pyarrow.NativeFile\", path: str, **reader_args):\n \"\"\"Reads a single file, passing all kwargs to the reader.\n\n This method should be implemented by subclasses.\n \"\"\"\n raise NotImplementedError(\n \"Subclasses of FileBasedDatasource must implement _read_files().\")\n\n\n# TODO(Clark): Add unit test coverage of _resolve_paths_and_filesystem and\n# _expand_paths.\n\n\ndef _resolve_paths_and_filesystem(\n paths: Union[str, List[str]],\n filesystem: \"pyarrow.fs.FileSystem\" = None,\n) -> Tuple[List[str], \"pyarrow.fs.FileSystem\"]:\n \"\"\"\n Resolves and normalizes all provided paths, infers a filesystem from the\n paths and ensures that all paths use the same filesystem.\n\n Args:\n paths: A single file/directory path or a list of file/directory paths.\n A list of paths can contain both files and directories.\n filesystem: The filesystem implementation that should be used for\n reading these files. If None, a filesystem will be inferred. If not\n None, the provided filesystem will still be validated against all\n filesystems inferred from the provided paths to ensure\n compatibility.\n \"\"\"\n from pyarrow.fs import FileSystem, PyFileSystem, FSSpecHandler, \\\n _resolve_filesystem_and_path\n import fsspec\n\n if isinstance(paths, str):\n paths = [paths]\n elif (not isinstance(paths, list)\n or any(not isinstance(p, str) for p in paths)):\n raise ValueError(\n \"paths must be a path string or a list of path strings.\")\n elif len(paths) == 0:\n raise ValueError(\"Must provide at least one path.\")\n\n if filesystem and not isinstance(filesystem, FileSystem):\n if not isinstance(filesystem, fsspec.spec.AbstractFileSystem):\n raise TypeError(f\"The filesystem passed must either conform to \"\n f\"pyarrow.fs.FileSystem, or \"\n f\"fsspec.spec.AbstractFileSystem. The provided \"\n f\"filesystem was: {filesystem}\")\n filesystem = PyFileSystem(FSSpecHandler(filesystem))\n\n resolved_paths = []\n for path in paths:\n if filesystem is not None:\n # If we provide a filesystem, _resolve_filesystem_and_path will not\n # slice off the protocol from the provided URI/path when resolved.\n path = _unwrap_protocol(path)\n resolved_filesystem, resolved_path = _resolve_filesystem_and_path(\n path, filesystem)\n if filesystem is None:\n filesystem = resolved_filesystem\n resolved_path = filesystem.normalize_path(resolved_path)\n resolved_paths.append(resolved_path)\n\n return resolved_paths, filesystem\n\n\ndef _expand_paths(paths: Union[str, List[str]],\n filesystem: \"pyarrow.fs.FileSystem\"):\n \"\"\"\n Expands all provided paths into concrete file paths by walking directories.\n Also returns a sidecar of file infos.\n\n This should be used on the output of _resolve_paths_and_filesystem.\n\n Args:\n paths: A single file/directory path or a list of file/directory paths.\n A list of paths can contain both files and directories. These paths\n should be properly resolved, e.g. the paths returned from\n _resolve_paths_and_filesystem.\n filesystem: The filesystem implementation that should be used for\n reading these files.\n \"\"\"\n from pyarrow.fs import FileType\n expanded_paths = []\n file_infos = []\n for path in paths:\n file_info = filesystem.get_file_info(path)\n if file_info.type == FileType.Directory:\n paths, file_infos_ = _expand_directory(path, filesystem)\n expanded_paths.extend(paths)\n file_infos.extend(file_infos_)\n elif file_info.type == FileType.File:\n expanded_paths.append(path)\n file_infos.append(file_info)\n else:\n raise FileNotFoundError(path)\n return expanded_paths, file_infos\n\n\ndef _expand_directory(path: str,\n filesystem: \"pyarrow.fs.FileSystem\",\n exclude_prefixes: List[str] = [\".\", \"_\"]) -> List[str]:\n \"\"\"\n Expand the provided directory path to a list of file paths.\n\n Args:\n path: The directory path to expand.\n filesystem: The filesystem implementation that should be used for\n reading these files.\n exclude_prefixes: The file relative path prefixes that should be\n excluded from the returned file set. Default excluded prefixes are\n \".\" and \"_\".\n\n Returns:\n A list of file paths contained in the provided directory.\n \"\"\"\n from pyarrow.fs import FileSelector\n selector = FileSelector(path, recursive=True)\n files = filesystem.get_file_info(selector)\n base_path = selector.base_dir\n filtered_paths = []\n for file_ in files:\n if not file_.is_file:\n continue\n file_path = file_.path\n if not file_path.startswith(base_path):\n continue\n relative = file_path[len(base_path):]\n if any(relative.startswith(prefix) for prefix in [\".\", \"_\"]):\n continue\n filtered_paths.append((file_path, file_))\n # We sort the paths to guarantee a stable order.\n return zip(*sorted(filtered_paths, key=lambda x: x[0]))\n\n\ndef _unwrap_protocol(path):\n \"\"\"\n Slice off any protocol prefixes on path.\n \"\"\"\n parsed = urlparse(path)\n return parsed.netloc + parsed.path\n\n\ndef _wrap_s3_serialization_workaround(filesystem: \"pyarrow.fs.FileSystem\"):\n # This is needed because pa.fs.S3FileSystem assumes pa.fs is already\n # imported before deserialization. See #17085.\n import pyarrow as pa\n if isinstance(filesystem, pa.fs.S3FileSystem):\n return _S3FileSystemWrapper(filesystem)\n return filesystem\n\n\nclass _S3FileSystemWrapper:\n def __init__(self, fs: \"pyarrow.fs.S3FileSystem\"):\n self._fs = fs\n\n def unwrap(self):\n return self._fs\n\n @classmethod\n def _reconstruct(cls, fs_reconstruct, fs_args):\n # Implicitly trigger S3 subsystem initialization by importing\n # pyarrow.fs.\n import pyarrow.fs # noqa: F401\n\n return cls(fs_reconstruct(*fs_args))\n\n def __reduce__(self):\n return _S3FileSystemWrapper._reconstruct, self._fs.__reduce__()\n"
] | [
[
"numpy.array_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
WildElaeagnus/Adjustment-calculator | [
"564ab7bd94c5117e93f47ca2579621f9122f1d63"
] | [
"adjustment_calculator.py"
] | [
"# рассчет корректировки из эксель файла\n\nfrom numpy import NaN\nimport pandas as pd\nimport re\nfrom collections import namedtuple\n\nfrom file_browser import *\n\n# DEBUG = True\nDEBUG = False\n\nxl_columns = namedtuple('xl_columns', 'data tu corr')\ncol = xl_columns(2, 8, 10)\n\ncols_names = namedtuple('cols_names', '''\n rp \n col \n sum \n numc \n num \n tu \n dateCF \n typeCF \n price''')\ncol_names = cols_names('Расчетный период', \n 'Количество', \n 'Сумма', \n 'Номенклатура.Код', \n 'Номенклатура', \n 'Теплоустановка', \n 'Дата СФ', \n 'Вид СФ', \n 'Цена')\nif(DEBUG):\n # only worked after $pip install openpyxl\n filepath = 'C:/Users/akorz/Desktop/Python_code/Adjustment-calculator/test_files/10. Свод начислений ТЭ2100-00812 с учетом кор-ки от 31.07.21.xlsx'\n # filepath = 'C:/Users/akorz/Desktop/Python_code/Adjustment-calculator/test_files/10. Свод начислений 717108ОДН.xlsx'\n \n df = pd.read_excel(filepath, index_col=0,) \n\nif(not DEBUG):\n fb = file_browser_()\n fb.file_browser_()\n df = pd.read_excel(fb.filename, index_col=0, ) \n\nadj = df.iloc[:, [col.data, col.tu, col.corr]]\n\npog = adj.iloc[:, [2]]\n \ndef find_in_df(string_to_find):\n pos_list_ = []\n count = 0\n for pos, i in enumerate(pog.iterrows(), start=1):\n for j in i:\n if (str(j).find(string_to_find) > -1 ):\n count += 1\n pos_list_.append(pos)\n return pos_list_\n\n# найти номера строк, данные из которых надо пересчитать\ncorr_str = \"Корректировочный СФ\"\ncorr_str_ = \"Исправление СФ\"\ncorr_list = [corr_str, corr_str_]\n# pos_list_corr = find_in_df(corr_str)\n\n# найти первую ячейку в ряде заголовков столбцов\npos_lbs = find_in_df(\"Расчетный период\")\n\ndf = df.reset_index()\n# убрать ненужные строки в начале\ndf = df.drop(range(pos_lbs[0]-1))\n# поставить имена столбцов\ndf = df.rename(columns=df.iloc[0])\n# если столбец имеет НаН то надо его удалить\ndf = df.loc[:, df.columns.notnull()]\ndf = df.reset_index()\ndf = df.drop(['index'], axis=1)\n\ndfp = df.sort_values([col_names.rp, col_names.tu, col_names.numc])\n\ndfpi = dfp.reset_index()\n# убираем пустые значения из столба количество\ndfpi.dropna(subset = [col_names.col], inplace=True)\ndfpi = dfpi[[col_names.rp, col_names.tu, col_names.col, col_names.numc, col_names.typeCF]]\ndfpi2 = dfpi\nwith pd.ExcelWriter(\"before.xlsx\") as writer:\n dfpi2.to_excel(writer, header=True, index=False, sheet_name='before' )\n\n# извлекает номер ТУ из ячейки\ndef re_str(cell):\n return (str(re.findall(r'\\d+'+'_', str(cell)))[2:-3])\n# список со строками в которых лежат исправления\ni_pos = []\nfor i, row in dfpi.iterrows():\n if (row[col_names.typeCF] == corr_str or row[col_names.typeCF] == corr_str_):\n i_pos.append(i)\n# список индексов с рядами, которые надо удалить в конце работы программы\ndrop_list = []\n\nprint(dfpi)\nprint(i_pos)\nfor i in i_pos:\n # расчетный период совпадает выше\n if (dfpi.at[i, col_names.rp] == dfpi.at[i-1, col_names.rp]):\n # если номер ТУ совпадает с номером ячейки ВЫШЕ\n if re_str(dfpi.at[i, col_names.tu]) == re_str(dfpi.at[i-1, col_names.tu]):\n # их номера номенклатура код совпадают\n if (dfpi.at[i, col_names.numc] == dfpi.at[i-1, col_names.numc]):\n # если вид СФ верхней ячейки пусто то надо туда добавить колличество из нижней\n # а текущую удалить\n if dfpi.isnull().at[i-1, col_names.typeCF]: \n dfpi.at[i-1, col_names.col] += dfpi.at[i, col_names.col]\n drop_list.append(i)\n # изменить ячейку, чтоб при сравнении с нижними она не учитывалась\n dfpi.at[i, col_names.typeCF] = 'solved'\n # расчетный период совпадает ниже\n if (dfpi.at[i, col_names.rp] == dfpi.at[i+1, col_names.rp]):\n # если ниже номр ТУ совпадает\n if (re_str(dfpi.at[i, col_names.tu]) == re_str(dfpi.at[i+1, col_names.tu])):\n # их номера номенклатура код совпадают\n if (dfpi.at[i, col_names.numc] == dfpi.at[i+1, col_names.numc]):\n # если вид СФ нижней ячейки пусто то надо туда добавить колличество из нижней\n # а текущую удалить\n if dfpi.isnull().at[i+1, col_names.typeCF]: \n dfpi.at[i+1, col_names.col] += dfpi.at[i, col_names.col]\n drop_list.append(i)\n dfpi.at[i, col_names.typeCF] = 'solved'\n # если ячейка ниже = Корректировочный, то в нее добавляем данные из этой ячейки\n # и ставим значение next line\n if dfpi.at[i+1, col_names.typeCF] == corr_str :\n dfpi.at[i+1, col_names.col] += dfpi.at[i, col_names.col]\n drop_list.append(i)\n dfpi.at[i, col_names.typeCF] = 'next line'\n # # если ячейка ВЫШЕ имеет значение next line, а ниже ЕСТЬ корректировочный \n # # то надо ... в теории то же самое что и без next line\n # if (dfpi.at[i+1, col_names.typeCF] == corr_str) and (dfpi.at[i-1, col_names.typeCF] == 'next line'):\n \n # соответсвенно если ячейка ВЫШЕ имеет значение next line, а ниже значения корректировочный для \n # этой ТУ уже нет или номенклатура код другая, то надо:\n # очистить Вид СФ\n # НЕ добавлять в лист на удаление drop_list\n # записать сумму верхней и текущей в столбце колличество\n if (dfpi.at[i-1, col_names.typeCF] == 'next line') :\n if (dfpi.at[i, col_names.typeCF] != 'next line'):\n dfpi.at[i, col_names.typeCF] = NaN\n\n\n# пометить все неудачные рассчеты как невыполненные\ndfpi.loc[dfpi[col_names.typeCF] == corr_str, [col_names.typeCF]] = 'unable to solve'\ndfpi.loc[dfpi[col_names.typeCF] == corr_str_, [col_names.typeCF]] = 'unable to solve'\n\n# в конце удаляем строки ненужные +НаН в колонке количество\nwith pd.ExcelWriter(\"after.xlsx\") as writer:\n dfpi.to_excel(writer, header=True, index=False, sheet_name='after' )\nprint(dfpi)\n \ndfpi = dfpi.drop(drop_list, axis=0)\ndfpi = dfpi[dfpi[\"Расчетный период\"] != \"Расчетный период\"]\n\n# убрать пустые значения из столба теплоустановок\ndfpi.dropna(subset=[col_names.tu], inplace=True)\n\ndfpi.loc[\"Total\", col_names.col] = dfpi[col_names.col].sum()\ndfpi.at[\"Total\",col_names.rp] = \"Total\"\nwith pd.ExcelWriter(\"output.xlsx\") as writer:\n dfpi.to_excel(writer, header=True, index=False, )\n\nif not DEBUG: input()\n"
] | [
[
"pandas.read_excel",
"pandas.ExcelWriter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
percygautam/arviz | [
"e355c1b0d2dcf0fb0f1914370295915a73bd1dff"
] | [
"arviz/tests/base_tests/test_plots_bokeh.py"
] | [
"# pylint: disable=redefined-outer-name,too-many-lines\n\"\"\"Tests use the 'bokeh' backend.\"\"\"\nfrom copy import deepcopy\n\nimport numpy as np\nimport pytest\nfrom pandas import DataFrame # pylint: disable=wrong-import-position\n\nfrom ...data import from_dict, load_arviz_data # pylint: disable=wrong-import-position\nfrom ...plots import ( # pylint: disable=wrong-import-position\n plot_autocorr,\n plot_bpv,\n plot_compare,\n plot_density,\n plot_dist,\n plot_dist_comparison,\n plot_elpd,\n plot_energy,\n plot_ess,\n plot_forest,\n plot_hdi,\n plot_joint,\n plot_kde,\n plot_khat,\n plot_loo_pit,\n plot_mcse,\n plot_pair,\n plot_parallel,\n plot_posterior,\n plot_ppc,\n plot_rank,\n plot_separation,\n plot_trace,\n plot_violin,\n)\nfrom ...rcparams import rc_context, rcParams # pylint: disable=wrong-import-position\nfrom ...stats import compare, hdi, loo, waic # pylint: disable=wrong-import-position\nfrom ..helpers import ( # pylint: disable=unused-import, wrong-import-position\n create_model,\n create_multidimensional_model,\n eight_schools_params,\n importorskip,\n models,\n multidim_models,\n)\n\n# Skip tests if bokeh not installed\nbkp = importorskip(\"bokeh.plotting\") # pylint: disable=invalid-name\n\n\nrcParams[\"data.load\"] = \"eager\"\n\n\[email protected](scope=\"module\")\ndef data(eight_schools_params):\n data = eight_schools_params\n return data\n\n\[email protected](scope=\"module\")\ndef df_trace():\n return DataFrame({\"a\": np.random.poisson(2.3, 100)})\n\n\[email protected](scope=\"module\")\ndef discrete_model():\n \"\"\"Simple fixture for random discrete model\"\"\"\n return {\"x\": np.random.randint(10, size=100), \"y\": np.random.randint(10, size=100)}\n\n\[email protected](scope=\"module\")\ndef continuous_model():\n \"\"\"Simple fixture for random continuous model\"\"\"\n return {\"x\": np.random.beta(2, 5, size=100), \"y\": np.random.beta(2, 5, size=100)}\n\n\[email protected](\n \"kwargs\",\n [\n {\"point_estimate\": \"mean\"},\n {\"point_estimate\": \"median\"},\n {\"hdi_prob\": 0.94},\n {\"hdi_prob\": 1},\n {\"outline\": True},\n {\"hdi_markers\": [\"v\"]},\n {\"shade\": 1},\n ],\n)\ndef test_plot_density_float(models, kwargs):\n obj = [getattr(models, model_fit) for model_fit in [\"model_1\", \"model_2\"]]\n axes = plot_density(obj, backend=\"bokeh\", show=False, **kwargs)\n assert axes.shape[0] >= 6\n assert axes.shape[0] >= 3\n\n\ndef test_plot_density_discrete(discrete_model):\n axes = plot_density(discrete_model, shade=0.9, backend=\"bokeh\", show=False)\n assert axes.shape[0] == 1\n\n\ndef test_plot_density_no_subset():\n \"\"\"Test plot_density works when variables are not subset of one another (#1093).\"\"\"\n model_ab = from_dict(\n {\n \"a\": np.random.normal(size=200),\n \"b\": np.random.normal(size=200),\n }\n )\n model_bc = from_dict(\n {\n \"b\": np.random.normal(size=200),\n \"c\": np.random.normal(size=200),\n }\n )\n axes = plot_density([model_ab, model_bc])\n assert axes.size == 3\n\n\ndef test_plot_density_bad_kwargs(models):\n obj = [getattr(models, model_fit) for model_fit in [\"model_1\", \"model_2\"]]\n with pytest.raises(ValueError):\n plot_density(obj, point_estimate=\"bad_value\", backend=\"bokeh\", show=False)\n\n with pytest.raises(ValueError):\n plot_density(\n obj,\n data_labels=[\"bad_value_{}\".format(i) for i in range(len(obj) + 10)],\n backend=\"bokeh\",\n show=False,\n )\n\n with pytest.raises(ValueError):\n plot_density(obj, hdi_prob=2, backend=\"bokeh\", show=False)\n\n\[email protected](\n \"kwargs\",\n [\n {},\n {\"y_hat_line\": True},\n {\"expected_events\": True},\n {\"y_hat_line_kwargs\": {\"linestyle\": \"dotted\"}},\n {\"exp_events_kwargs\": {\"marker\": \"o\"}},\n ],\n)\ndef test_plot_separation(kwargs):\n idata = load_arviz_data(\"classification10d\")\n ax = plot_separation(idata=idata, y=\"outcome\", backend=\"bokeh\", show=False, **kwargs)\n assert ax\n\n\[email protected](\n \"kwargs\",\n [\n {},\n {\"var_names\": \"mu\"},\n {\"var_names\": [\"mu\", \"tau\"]},\n {\"combined\": True, \"rug\": True},\n {\"compact\": True, \"legend\": True},\n {\"combined\": True, \"compact\": True, \"legend\": True},\n {\"divergences\": \"top\"},\n {\"divergences\": False},\n {\"kind\": \"rank_vlines\"},\n {\"kind\": \"rank_bars\"},\n {\"lines\": [(\"mu\", {}, [1, 2])]},\n {\"lines\": [(\"mu\", {}, 8)]},\n ],\n)\ndef test_plot_trace(models, kwargs):\n axes = plot_trace(models.model_1, backend=\"bokeh\", show=False, **kwargs)\n assert axes.shape\n\n\ndef test_plot_trace_discrete(discrete_model):\n axes = plot_trace(discrete_model, backend=\"bokeh\", show=False)\n assert axes.shape\n\n\ndef test_plot_trace_max_subplots_warning(models):\n with pytest.warns(UserWarning):\n with rc_context(rc={\"plot.max_subplots\": 2}):\n axes = plot_trace(models.model_1, backend=\"bokeh\", show=False)\n assert axes.shape\n\n\[email protected](\n \"kwargs\",\n [\n {\"plot_kwargs\": {\"line_dash\": \"solid\"}},\n {\"contour\": True, \"fill_last\": False},\n {\n \"contour\": True,\n \"contourf_kwargs\": {\"cmap\": \"plasma\"},\n \"contour_kwargs\": {\"line_width\": 1},\n },\n {\"contour\": False},\n {\"contour\": False, \"pcolormesh_kwargs\": {\"cmap\": \"plasma\"}},\n ],\n)\ndef test_plot_kde(continuous_model, kwargs):\n axes = plot_kde(\n continuous_model[\"x\"], continuous_model[\"y\"], backend=\"bokeh\", show=False, **kwargs\n )\n assert axes\n\n\[email protected](\n \"kwargs\",\n [\n {\"cumulative\": True},\n {\"cumulative\": True, \"plot_kwargs\": {\"line_dash\": \"dashed\"}},\n {\"rug\": True},\n {\"rug\": True, \"rug_kwargs\": {\"line_alpha\": 0.2}, \"rotated\": True},\n ],\n)\ndef test_plot_kde_cumulative(continuous_model, kwargs):\n axes = plot_kde(continuous_model[\"x\"], backend=\"bokeh\", show=False, **kwargs)\n assert axes\n\n\[email protected](\n \"kwargs\",\n [\n {\"kind\": \"hist\"},\n {\"kind\": \"kde\"},\n {\"is_circular\": False},\n {\"is_circular\": False, \"kind\": \"hist\"},\n {\"is_circular\": True},\n {\"is_circular\": True, \"kind\": \"hist\"},\n {\"is_circular\": \"radians\"},\n {\"is_circular\": \"radians\", \"kind\": \"hist\"},\n {\"is_circular\": \"degrees\"},\n {\"is_circular\": \"degrees\", \"kind\": \"hist\"},\n ],\n)\ndef test_plot_dist(continuous_model, kwargs):\n axes = plot_dist(continuous_model[\"x\"], backend=\"bokeh\", show=False, **kwargs)\n assert axes\n\n\ndef test_plot_kde_1d(continuous_model):\n axes = plot_kde(continuous_model[\"y\"], backend=\"bokeh\", show=False)\n assert axes\n\n\[email protected](\n \"kwargs\",\n [\n {\"contour\": True, \"fill_last\": False},\n {\"contour\": True, \"contourf_kwargs\": {\"cmap\": \"plasma\"}},\n {\"contour\": False},\n {\"contour\": False, \"pcolormesh_kwargs\": {\"cmap\": \"plasma\"}},\n ],\n)\ndef test_plot_kde_2d(continuous_model, kwargs):\n axes = plot_kde(\n continuous_model[\"x\"], continuous_model[\"y\"], backend=\"bokeh\", show=False, **kwargs\n )\n assert axes\n\n\[email protected](\n \"kwargs\", [{\"plot_kwargs\": {\"line_dash\": \"solid\"}}, {\"cumulative\": True}, {\"rug\": True}]\n)\ndef test_plot_kde_quantiles(continuous_model, kwargs):\n axes = plot_kde(\n continuous_model[\"x\"], quantiles=[0.05, 0.5, 0.95], backend=\"bokeh\", show=False, **kwargs\n )\n assert axes\n\n\ndef test_plot_autocorr_short_chain():\n \"\"\"Check that logic for small chain defaulting doesn't cause exception\"\"\"\n chain = np.arange(10)\n axes = plot_autocorr(chain, backend=\"bokeh\", show=False)\n assert axes\n\n\ndef test_plot_autocorr_uncombined(models):\n axes = plot_autocorr(models.model_1, combined=False, backend=\"bokeh\", show=False)\n assert axes.shape[0] == 10\n max_subplots = (\n np.inf if rcParams[\"plot.max_subplots\"] is None else rcParams[\"plot.max_subplots\"]\n )\n assert len([ax for ax in axes.ravel() if ax is not None]) == min(72, max_subplots)\n\n\ndef test_plot_autocorr_combined(models):\n axes = plot_autocorr(models.model_1, combined=True, backend=\"bokeh\", show=False)\n assert axes.shape[0] == 6\n assert axes.shape[1] == 3\n\n\[email protected](\"var_names\", (None, \"mu\", [\"mu\", \"tau\"]))\ndef test_plot_autocorr_var_names(models, var_names):\n axes = plot_autocorr(\n models.model_1, var_names=var_names, combined=True, backend=\"bokeh\", show=False\n )\n assert axes.shape\n\n\[email protected](\n \"kwargs\", [{\"insample_dev\": False}, {\"plot_standard_error\": False}, {\"plot_ic_diff\": False}]\n)\ndef test_plot_compare(models, kwargs):\n\n model_compare = compare({\"Model 1\": models.model_1, \"Model 2\": models.model_2})\n\n axes = plot_compare(model_compare, backend=\"bokeh\", show=False, **kwargs)\n assert axes\n\n\ndef test_plot_compare_manual(models):\n \"\"\"Test compare plot without scale column\"\"\"\n model_compare = compare({\"Model 1\": models.model_1, \"Model 2\": models.model_2})\n\n # remove \"scale\" column\n del model_compare[\"loo_scale\"]\n axes = plot_compare(model_compare, backend=\"bokeh\", show=False)\n assert axes\n\n\ndef test_plot_compare_no_ic(models):\n \"\"\"Check exception is raised if model_compare doesn't contain a valid information criterion\"\"\"\n model_compare = compare({\"Model 1\": models.model_1, \"Model 2\": models.model_2})\n\n # Drop column needed for plotting\n model_compare = model_compare.drop(\"loo\", axis=1)\n with pytest.raises(ValueError) as err:\n plot_compare(model_compare, backend=\"bokeh\", show=False)\n\n assert \"comp_df must contain one of the following\" in str(err.value)\n assert \"['loo', 'waic']\" in str(err.value)\n\n\[email protected](\"kwargs\", [{}, {\"ic\": \"loo\"}, {\"xlabels\": True, \"scale\": \"log\"}])\[email protected](\"add_model\", [False, True])\[email protected](\"use_elpddata\", [False, True])\ndef test_plot_elpd(models, add_model, use_elpddata, kwargs):\n model_dict = {\"Model 1\": models.model_1, \"Model 2\": models.model_2}\n if add_model:\n model_dict[\"Model 3\"] = create_model(seed=12)\n\n if use_elpddata:\n ic = kwargs.get(\"ic\", \"waic\")\n scale = kwargs.get(\"scale\", \"deviance\")\n if ic == \"waic\":\n model_dict = {k: waic(v, scale=scale, pointwise=True) for k, v in model_dict.items()}\n else:\n model_dict = {k: loo(v, scale=scale, pointwise=True) for k, v in model_dict.items()}\n\n axes = plot_elpd(model_dict, backend=\"bokeh\", show=False, **kwargs)\n assert np.any(axes)\n if add_model:\n assert axes.shape[0] == axes.shape[1]\n assert axes.shape[0] == len(model_dict) - 1\n\n\[email protected](\"kwargs\", [{}, {\"ic\": \"loo\"}, {\"xlabels\": True, \"scale\": \"log\"}])\[email protected](\"add_model\", [False, True])\[email protected](\"use_elpddata\", [False, True])\ndef test_plot_elpd_multidim(multidim_models, add_model, use_elpddata, kwargs):\n model_dict = {\"Model 1\": multidim_models.model_1, \"Model 2\": multidim_models.model_2}\n if add_model:\n model_dict[\"Model 3\"] = create_multidimensional_model(seed=12)\n\n if use_elpddata:\n ic = kwargs.get(\"ic\", \"waic\")\n scale = kwargs.get(\"scale\", \"deviance\")\n if ic == \"waic\":\n model_dict = {k: waic(v, scale=scale, pointwise=True) for k, v in model_dict.items()}\n else:\n model_dict = {k: loo(v, scale=scale, pointwise=True) for k, v in model_dict.items()}\n\n axes = plot_elpd(model_dict, backend=\"bokeh\", show=False, **kwargs)\n assert np.any(axes)\n if add_model:\n assert axes.shape[0] == axes.shape[1]\n assert axes.shape[0] == len(model_dict) - 1\n\n\[email protected](\"kind\", [\"kde\", \"hist\"])\ndef test_plot_energy(models, kind):\n assert plot_energy(models.model_1, kind=kind, backend=\"bokeh\", show=False)\n\n\ndef test_plot_energy_bad(models):\n with pytest.raises(ValueError):\n plot_energy(models.model_1, kind=\"bad_kind\", backend=\"bokeh\", show=False)\n\n\[email protected](\n \"kwargs\",\n [\n {},\n {\"var_names\": [\"theta\"], \"relative\": True, \"color\": \"r\"},\n {\"coords\": {\"theta_dim_0\": slice(4)}, \"n_points\": 10},\n {\"min_ess\": 600, \"hline_kwargs\": {\"color\": \"r\"}},\n ],\n)\[email protected](\"kind\", [\"local\", \"quantile\", \"evolution\"])\ndef test_plot_ess(models, kind, kwargs):\n \"\"\"Test plot_ess arguments common to all kind of plots.\"\"\"\n idata = models.model_1\n ax = plot_ess(idata, kind=kind, backend=\"bokeh\", show=False, **kwargs)\n assert np.all(ax)\n\n\[email protected](\n \"kwargs\",\n [\n {\"rug\": True},\n {\"rug\": True, \"rug_kind\": \"max_depth\", \"rug_kwargs\": {\"color\": \"c\"}},\n {\"extra_methods\": True},\n {\"extra_methods\": True, \"extra_kwargs\": {\"ls\": \":\"}, \"text_kwargs\": {\"x\": 0, \"ha\": \"left\"}},\n {\"extra_methods\": True, \"rug\": True},\n ],\n)\[email protected](\"kind\", [\"local\", \"quantile\"])\ndef test_plot_ess_local_quantile(models, kind, kwargs):\n \"\"\"Test specific arguments in kinds local and quantile of plot_ess.\"\"\"\n idata = models.model_1\n ax = plot_ess(idata, kind=kind, backend=\"bokeh\", show=False, **kwargs)\n assert np.all(ax)\n\n\ndef test_plot_ess_evolution(models):\n \"\"\"Test specific arguments in evolution kind of plot_ess.\"\"\"\n idata = models.model_1\n ax = plot_ess(\n idata,\n kind=\"evolution\",\n extra_kwargs={\"linestyle\": \"--\"},\n color=\"b\",\n backend=\"bokeh\",\n show=False,\n )\n assert np.all(ax)\n\n\ndef test_plot_ess_bad_kind(models):\n \"\"\"Test error when plot_ess recieves an invalid kind.\"\"\"\n idata = models.model_1\n with pytest.raises(ValueError, match=\"Invalid kind\"):\n plot_ess(idata, kind=\"bad kind\", backend=\"bokeh\", show=False)\n\n\[email protected](\"dim\", [\"chain\", \"draw\"])\ndef test_plot_ess_bad_coords(models, dim):\n \"\"\"Test error when chain or dim are used as coords to select a data subset.\"\"\"\n idata = models.model_1\n with pytest.raises(ValueError, match=\"invalid coordinates\"):\n plot_ess(idata, coords={dim: slice(3)}, backend=\"bokeh\", show=False)\n\n\ndef test_plot_ess_no_sample_stats(models):\n \"\"\"Test error when rug=True but sample_stats group is not present.\"\"\"\n idata = models.model_1\n with pytest.raises(ValueError, match=\"must contain sample_stats\"):\n plot_ess(idata.posterior, rug=True, backend=\"bokeh\", show=False)\n\n\ndef test_plot_ess_no_divergences(models):\n \"\"\"Test error when rug=True, but the variable defined by rug_kind is missing.\"\"\"\n idata = deepcopy(models.model_1)\n idata.sample_stats = idata.sample_stats.rename({\"diverging\": \"diverging_missing\"})\n with pytest.raises(ValueError, match=\"not contain diverging\"):\n plot_ess(idata, rug=True, backend=\"bokeh\", show=False)\n\n\[email protected](\"model_fits\", [[\"model_1\"], [\"model_1\", \"model_2\"]])\[email protected](\n \"args_expected\",\n [\n ({}, 1),\n ({\"var_names\": \"mu\"}, 1),\n ({\"var_names\": \"mu\", \"rope\": (-1, 1)}, 1),\n ({\"r_hat\": True, \"quartiles\": False}, 2),\n ({\"var_names\": [\"mu\"], \"colors\": \"black\", \"ess\": True, \"combined\": True}, 2),\n (\n {\n \"kind\": \"ridgeplot\",\n \"ridgeplot_truncate\": False,\n \"ridgeplot_quantiles\": [0.25, 0.5, 0.75],\n },\n 1,\n ),\n ({\"kind\": \"ridgeplot\", \"r_hat\": True, \"ess\": True}, 3),\n ({\"kind\": \"ridgeplot\", \"r_hat\": True, \"ess\": True, \"ridgeplot_alpha\": 0}, 3),\n (\n {\n \"var_names\": [\"mu\", \"tau\"],\n \"rope\": {\"mu\": [{\"rope\": (-0.1, 0.1)}], \"tau\": [{\"rope\": (0.2, 0.5)}]},\n },\n 1,\n ),\n ],\n)\ndef test_plot_forest(models, model_fits, args_expected):\n obj = [getattr(models, model_fit) for model_fit in model_fits]\n args, expected = args_expected\n axes = plot_forest(obj, backend=\"bokeh\", show=False, **args)\n assert axes.shape == (1, expected)\n\n\ndef test_plot_forest_rope_exception():\n with pytest.raises(ValueError) as err:\n plot_forest({\"x\": [1]}, rope=\"not_correct_format\", backend=\"bokeh\", show=False)\n assert \"Argument `rope` must be None, a dictionary like\" in str(err.value)\n\n\ndef test_plot_forest_single_value():\n axes = plot_forest({\"x\": [1]}, backend=\"bokeh\", show=False)\n assert axes.shape\n\n\[email protected](\"model_fits\", [[\"model_1\"], [\"model_1\", \"model_2\"]])\ndef test_plot_forest_bad(models, model_fits):\n obj = [getattr(models, model_fit) for model_fit in model_fits]\n with pytest.raises(TypeError):\n plot_forest(obj, kind=\"bad_kind\", backend=\"bokeh\", show=False)\n\n with pytest.raises(ValueError):\n plot_forest(\n obj,\n model_names=[\"model_name_{}\".format(i) for i in range(len(obj) + 10)],\n backend=\"bokeh\",\n show=False,\n )\n\n\[email protected](\n \"kwargs\",\n [\n {\"color\": \"C5\", \"circular\": True},\n {\"hdi_data\": True, \"fill_kwargs\": {\"alpha\": 0}},\n {\"plot_kwargs\": {\"alpha\": 0}},\n {\"smooth_kwargs\": {\"window_length\": 33, \"polyorder\": 5, \"mode\": \"mirror\"}},\n {\"hdi_data\": True, \"smooth\": False, \"color\": \"xkcd:jade\"},\n ],\n)\ndef test_plot_hdi(models, data, kwargs):\n hdi_data = kwargs.pop(\"hdi_data\", None)\n y_data = models.model_1.posterior[\"theta\"]\n if hdi_data:\n hdi_data = hdi(y_data)\n axis = plot_hdi(data[\"y\"], hdi_data=hdi_data, backend=\"bokeh\", show=False, **kwargs)\n else:\n axis = plot_hdi(data[\"y\"], y_data, backend=\"bokeh\", show=False, **kwargs)\n assert axis\n\n\[email protected](\"kind\", [\"scatter\", \"hexbin\", \"kde\"])\ndef test_plot_joint(models, kind):\n axes = plot_joint(\n models.model_1, var_names=(\"mu\", \"tau\"), kind=kind, backend=\"bokeh\", show=False\n )\n assert axes[1, 0]\n\n\ndef test_plot_joint_ax_tuple(models):\n ax = plot_joint(models.model_1, var_names=(\"mu\", \"tau\"), backend=\"bokeh\", show=False)\n axes = plot_joint(models.model_2, var_names=(\"mu\", \"tau\"), ax=ax, backend=\"bokeh\", show=False)\n assert axes[1, 0]\n\n\ndef test_plot_joint_discrete(discrete_model):\n axes = plot_joint(discrete_model, backend=\"bokeh\", show=False)\n assert axes[1, 0]\n\n\ndef test_plot_joint_bad(models):\n with pytest.raises(ValueError):\n plot_joint(\n models.model_1, var_names=(\"mu\", \"tau\"), kind=\"bad_kind\", backend=\"bokeh\", show=False\n )\n\n with pytest.raises(Exception):\n plot_joint(models.model_1, var_names=(\"mu\", \"tau\", \"eta\"), backend=\"bokeh\", show=False)\n\n with pytest.raises(ValueError):\n _, axes = list(range(5))\n plot_joint(models.model_1, var_names=(\"mu\", \"tau\"), ax=axes, backend=\"bokeh\", show=False)\n\n\[email protected](\n \"kwargs\",\n [\n {},\n {\"xlabels\": True},\n {\"color\": \"obs_dim\", \"xlabels\": True, \"show_bins\": True, \"bin_format\": \"{0}\"},\n {\"color\": \"obs_dim\", \"legend\": True, \"hover_label\": True},\n {\"color\": \"blue\", \"coords\": {\"obs_dim\": slice(2, 4)}},\n {\"color\": np.random.uniform(size=8), \"show_bins\": True},\n {\"color\": np.random.uniform(size=(8, 3)), \"show_bins\": True, \"annotate\": True},\n ],\n)\[email protected](\"input_type\", [\"elpd_data\", \"data_array\", \"array\"])\ndef test_plot_khat(models, input_type, kwargs):\n khats_data = loo(models.model_1, pointwise=True)\n\n if input_type == \"data_array\":\n khats_data = khats_data.pareto_k\n elif input_type == \"array\":\n khats_data = khats_data.pareto_k.values\n if \"color\" in kwargs and isinstance(kwargs[\"color\"], str) and kwargs[\"color\"] == \"obs_dim\":\n kwargs[\"color\"] = None\n\n axes = plot_khat(khats_data, backend=\"bokeh\", show=False, **kwargs)\n assert axes\n\n\[email protected](\n \"kwargs\",\n [\n {},\n {\"xlabels\": True},\n {\"color\": \"dim1\", \"xlabels\": True, \"show_bins\": True, \"bin_format\": \"{0}\"},\n {\"color\": \"dim2\", \"legend\": True, \"hover_label\": True},\n {\"color\": \"blue\", \"coords\": {\"dim2\": slice(2, 4)}},\n {\"color\": np.random.uniform(size=35), \"show_bins\": True},\n {\"color\": np.random.uniform(size=(35, 3)), \"show_bins\": True, \"annotate\": True},\n ],\n)\[email protected](\"input_type\", [\"elpd_data\", \"data_array\", \"array\"])\ndef test_plot_khat_multidim(multidim_models, input_type, kwargs):\n khats_data = loo(multidim_models.model_1, pointwise=True)\n\n if input_type == \"data_array\":\n khats_data = khats_data.pareto_k\n elif input_type == \"array\":\n khats_data = khats_data.pareto_k.values\n if (\n \"color\" in kwargs\n and isinstance(kwargs[\"color\"], str)\n and kwargs[\"color\"] in (\"dim1\", \"dim2\")\n ):\n kwargs[\"color\"] = None\n\n axes = plot_khat(khats_data, backend=\"bokeh\", show=False, **kwargs)\n assert axes\n\n\ndef test_plot_khat_annotate():\n khats = np.array([0, 0, 0.6, 0.6, 0.8, 0.9, 0.9, 2, 3, 4, 1.5])\n axes = plot_khat(khats, annotate=True, backend=\"bokeh\", show=False)\n assert axes\n\n\ndef test_plot_khat_bad_input(models):\n with pytest.raises(ValueError):\n plot_khat(models.model_1.sample_stats, backend=\"bokeh\", show=False)\n\n\[email protected](\n \"kwargs\",\n [\n {},\n {\"n_unif\": 50},\n {\"use_hdi\": True, \"color\": \"gray\"},\n {\"use_hdi\": True, \"credible_interval\": 0.68},\n {\"use_hdi\": True, \"hdi_kwargs\": {\"line_dash\": \"dashed\", \"alpha\": 0}},\n {\"ecdf\": True},\n {\"ecdf\": True, \"ecdf_fill\": False, \"plot_unif_kwargs\": {\"line_dash\": \"--\"}},\n {\"ecdf\": True, \"credible_interval\": 0.97, \"fill_kwargs\": {\"color\": \"red\"}},\n ],\n)\ndef test_plot_loo_pit(models, kwargs):\n axes = plot_loo_pit(idata=models.model_1, y=\"y\", backend=\"bokeh\", show=False, **kwargs)\n assert axes\n\n\ndef test_plot_loo_pit_incompatible_args(models):\n \"\"\"Test error when both ecdf and use_hdi are True.\"\"\"\n with pytest.raises(ValueError, match=\"incompatible\"):\n plot_loo_pit(\n idata=models.model_1, y=\"y\", ecdf=True, use_hdi=True, backend=\"bokeh\", show=False\n )\n\n\[email protected](\n \"args\",\n [\n {\"y\": \"str\"},\n {\"y\": \"DataArray\", \"y_hat\": \"str\"},\n {\"y\": \"ndarray\", \"y_hat\": \"str\"},\n {\"y\": \"ndarray\", \"y_hat\": \"DataArray\"},\n {\"y\": \"ndarray\", \"y_hat\": \"ndarray\"},\n ],\n)\ndef test_plot_loo_pit_label(models, args):\n if args[\"y\"] == \"str\":\n y = \"y\"\n elif args[\"y\"] == \"DataArray\":\n y = models.model_1.observed_data.y\n elif args[\"y\"] == \"ndarray\":\n y = models.model_1.observed_data.y.values\n\n if args.get(\"y_hat\") == \"str\":\n y_hat = \"y\"\n elif args.get(\"y_hat\") == \"DataArray\":\n y_hat = models.model_1.posterior_predictive.y.stack(sample=(\"chain\", \"draw\"))\n elif args.get(\"y_hat\") == \"ndarray\":\n y_hat = models.model_1.posterior_predictive.y.stack(sample=(\"chain\", \"draw\")).values\n else:\n y_hat = None\n\n ax = plot_loo_pit(idata=models.model_1, y=y, y_hat=y_hat, backend=\"bokeh\", show=False)\n assert ax\n\n\[email protected](\n \"kwargs\",\n [\n {},\n {\"var_names\": [\"theta\"], \"color\": \"r\"},\n {\"rug\": True, \"rug_kwargs\": {\"color\": \"r\"}},\n {\"errorbar\": True, \"rug\": True, \"rug_kind\": \"max_depth\"},\n {\"errorbar\": True, \"coords\": {\"theta_dim_0\": slice(4)}, \"n_points\": 10},\n {\"extra_methods\": True, \"rug\": True},\n {\"extra_methods\": True, \"extra_kwargs\": {\"ls\": \":\"}, \"text_kwargs\": {\"x\": 0, \"ha\": \"left\"}},\n ],\n)\ndef test_plot_mcse(models, kwargs):\n idata = models.model_1\n ax = plot_mcse(idata, backend=\"bokeh\", show=False, **kwargs)\n assert np.all(ax)\n\n\[email protected](\"dim\", [\"chain\", \"draw\"])\ndef test_plot_mcse_bad_coords(models, dim):\n \"\"\"Test error when chain or dim are used as coords to select a data subset.\"\"\"\n idata = models.model_1\n with pytest.raises(ValueError, match=\"invalid coordinates\"):\n plot_mcse(idata, coords={dim: slice(3)}, backend=\"bokeh\", show=False)\n\n\ndef test_plot_mcse_no_sample_stats(models):\n \"\"\"Test error when rug=True but sample_stats group is not present.\"\"\"\n idata = models.model_1\n with pytest.raises(ValueError, match=\"must contain sample_stats\"):\n plot_mcse(idata.posterior, rug=True, backend=\"bokeh\", show=False)\n\n\ndef test_plot_mcse_no_divergences(models):\n \"\"\"Test error when rug=True, but the variable defined by rug_kind is missing.\"\"\"\n idata = deepcopy(models.model_1)\n idata.sample_stats = idata.sample_stats.rename({\"diverging\": \"diverging_missing\"})\n with pytest.raises(ValueError, match=\"not contain diverging\"):\n plot_mcse(idata, rug=True, backend=\"bokeh\", show=False)\n\n\[email protected]\[email protected](\n \"kwargs\",\n [\n {\"var_names\": \"theta\", \"divergences\": True, \"coords\": {\"theta_dim_0\": [0, 1]}},\n {\"divergences\": True, \"var_names\": [\"theta\", \"mu\"]},\n {\"kind\": \"kde\", \"var_names\": [\"theta\"]},\n {\"kind\": \"hexbin\", \"var_names\": [\"theta\"]},\n {\"kind\": \"hexbin\", \"var_names\": [\"theta\"]},\n {\n \"kind\": \"hexbin\",\n \"var_names\": [\"theta\"],\n \"coords\": {\"theta_dim_0\": [0, 1]},\n \"textsize\": 20,\n },\n {\n \"point_estimate\": \"mean\",\n \"reference_values\": {\"mu\": 0, \"tau\": 0},\n \"reference_values_kwargs\": {\"line_color\": \"blue\"},\n },\n ],\n)\ndef test_plot_pair(models, kwargs):\n ax = plot_pair(models.model_1, backend=\"bokeh\", show=False, **kwargs)\n assert np.any(ax)\n\n\[email protected](\"kwargs\", [{\"kind\": \"scatter\"}, {\"kind\": \"kde\"}, {\"kind\": \"hexbin\"}])\ndef test_plot_pair_2var(discrete_model, kwargs):\n ax = plot_pair(\n discrete_model, ax=np.atleast_2d(bkp.figure()), backend=\"bokeh\", show=False, **kwargs\n )\n assert ax\n\n\ndef test_plot_pair_bad(models):\n with pytest.raises(ValueError):\n plot_pair(models.model_1, kind=\"bad_kind\", backend=\"bokeh\", show=False)\n with pytest.raises(Exception):\n plot_pair(models.model_1, var_names=[\"mu\"], backend=\"bokeh\", show=False)\n\n\[email protected](\"has_sample_stats\", [True, False])\ndef test_plot_pair_divergences_warning(has_sample_stats):\n data = load_arviz_data(\"centered_eight\")\n if has_sample_stats:\n # sample_stats present, diverging field missing\n data.sample_stats = data.sample_stats.rename({\"diverging\": \"diverging_missing\"})\n else:\n # sample_stats missing\n data = data.posterior # pylint: disable=no-member\n with pytest.warns(UserWarning):\n ax = plot_pair(data, divergences=True, backend=\"bokeh\", show=False)\n assert np.any(ax)\n\n\ndef test_plot_parallel_raises_valueerror(df_trace): # pylint: disable=invalid-name\n with pytest.raises(ValueError):\n plot_parallel(df_trace, backend=\"bokeh\", show=False)\n\n\[email protected](\"norm_method\", [None, \"normal\", \"minmax\", \"rank\"])\ndef test_plot_parallel(models, norm_method):\n assert plot_parallel(\n models.model_1,\n var_names=[\"mu\", \"tau\"],\n norm_method=norm_method,\n backend=\"bokeh\",\n show=False,\n )\n\n\[email protected](\"var_names\", [None, \"mu\", [\"mu\", \"tau\"]])\ndef test_plot_parallel_exception(models, var_names):\n \"\"\"Ensure that correct exception is raised when one variable is passed.\"\"\"\n with pytest.raises(ValueError):\n assert plot_parallel(\n models.model_1, var_names=var_names, norm_method=\"foo\", backend=\"bokeh\", show=False\n )\n\n\[email protected](\"var_names\", (None, \"mu\", [\"mu\", \"tau\"]))\ndef test_plot_violin(models, var_names):\n axes = plot_violin(models.model_1, var_names=var_names, backend=\"bokeh\", show=False)\n assert axes.shape\n\n\ndef test_plot_violin_ax(models):\n ax = bkp.figure()\n axes = plot_violin(models.model_1, var_names=\"mu\", ax=ax, backend=\"bokeh\", show=False)\n assert axes.shape\n\n\ndef test_plot_violin_layout(models):\n axes = plot_violin(\n models.model_1, var_names=[\"mu\", \"tau\"], sharey=False, backend=\"bokeh\", show=False\n )\n assert axes.shape\n\n\ndef test_plot_violin_discrete(discrete_model):\n axes = plot_violin(discrete_model, backend=\"bokeh\", show=False)\n assert axes.shape\n\n\[email protected](\"kind\", [\"kde\", \"cumulative\", \"scatter\"])\[email protected](\"alpha\", [None, 0.2, 1])\ndef test_plot_ppc(models, kind, alpha):\n axes = plot_ppc(\n models.model_1, kind=kind, alpha=alpha, random_seed=3, backend=\"bokeh\", show=False\n )\n assert axes\n\n\[email protected](\"kind\", [\"kde\", \"cumulative\", \"scatter\"])\[email protected](\"jitter\", [None, 0, 0.1, 1, 3])\ndef test_plot_ppc_multichain(kind, jitter):\n data = from_dict(\n posterior_predictive={\n \"x\": np.random.randn(4, 100, 30),\n \"y_hat\": np.random.randn(4, 100, 3, 10),\n },\n observed_data={\"x\": np.random.randn(30), \"y\": np.random.randn(3, 10)},\n )\n axes = plot_ppc(\n data,\n kind=kind,\n data_pairs={\"y\": \"y_hat\"},\n jitter=jitter,\n random_seed=3,\n backend=\"bokeh\",\n show=False,\n )\n assert np.all(axes)\n\n\[email protected](\"kind\", [\"kde\", \"cumulative\", \"scatter\"])\ndef test_plot_ppc_discrete(kind):\n data = from_dict(\n observed_data={\"obs\": np.random.randint(1, 100, 15)},\n posterior_predictive={\"obs\": np.random.randint(1, 300, (1, 20, 15))},\n )\n\n axes = plot_ppc(data, kind=kind, backend=\"bokeh\", show=False)\n assert axes\n\n\ndef test_plot_ppc_grid(models):\n axes = plot_ppc(models.model_1, kind=\"scatter\", flatten=[], backend=\"bokeh\", show=False)\n assert len(axes.ravel()) == 8\n axes = plot_ppc(\n models.model_1,\n kind=\"scatter\",\n flatten=[],\n coords={\"obs_dim\": [1, 2, 3]},\n backend=\"bokeh\",\n show=False,\n )\n assert len(axes.ravel()) == 3\n axes = plot_ppc(\n models.model_1,\n kind=\"scatter\",\n flatten=[\"obs_dim\"],\n coords={\"obs_dim\": [1, 2, 3]},\n backend=\"bokeh\",\n show=False,\n )\n assert len(axes.ravel()) == 1\n\n\[email protected](\"kind\", [\"kde\", \"cumulative\", \"scatter\"])\ndef test_plot_ppc_bad(models, kind):\n data = from_dict(posterior={\"mu\": np.random.randn()})\n with pytest.raises(TypeError):\n plot_ppc(data, kind=kind, backend=\"bokeh\", show=False)\n with pytest.raises(TypeError):\n plot_ppc(models.model_1, kind=\"bad_val\", backend=\"bokeh\", show=False)\n with pytest.raises(TypeError):\n plot_ppc(models.model_1, num_pp_samples=\"bad_val\", backend=\"bokeh\", show=False)\n\n\[email protected](\"kind\", [\"kde\", \"cumulative\", \"scatter\"])\ndef test_plot_ppc_ax(models, kind):\n \"\"\"Test ax argument of plot_ppc.\"\"\"\n ax = bkp.figure()\n axes = plot_ppc(models.model_1, kind=kind, ax=ax, backend=\"bokeh\", show=False)\n assert axes[0, 0] is ax\n\n\[email protected](\n \"kwargs\",\n [\n {},\n {\"var_names\": \"mu\"},\n {\"var_names\": (\"mu\", \"tau\")},\n {\"rope\": (-2, 2)},\n {\"rope\": {\"mu\": [{\"rope\": (-2, 2)}], \"theta\": [{\"school\": \"Choate\", \"rope\": (2, 4)}]}},\n {\"point_estimate\": \"mode\"},\n {\"point_estimate\": \"median\"},\n {\"point_estimate\": None},\n {\"hdi_prob\": \"hide\"},\n {\"ref_val\": 0},\n {\"ref_val\": None},\n {\"ref_val\": {\"mu\": [{\"ref_val\": 1}]}},\n {\"bins\": None, \"kind\": \"hist\"},\n {\n \"ref_val\": {\n \"theta\": [\n # {\"school\": [\"Choate\", \"Deerfield\"], \"ref_val\": -1}, this is not working\n {\"school\": \"Lawrenceville\", \"ref_val\": 3}\n ]\n }\n },\n ],\n)\ndef test_plot_posterior(models, kwargs):\n axes = plot_posterior(models.model_1, backend=\"bokeh\", show=False, **kwargs)\n assert axes.shape\n\n\[email protected](\"kwargs\", [{}, {\"point_estimate\": \"mode\"}, {\"bins\": None, \"kind\": \"hist\"}])\ndef test_plot_posterior_discrete(discrete_model, kwargs):\n axes = plot_posterior(discrete_model, backend=\"bokeh\", show=False, **kwargs)\n assert axes.shape\n\n\ndef test_plot_posterior_bad(models):\n with pytest.raises(ValueError):\n plot_posterior(models.model_1, backend=\"bokeh\", show=False, rope=\"bad_value\")\n with pytest.raises(ValueError):\n plot_posterior(models.model_1, ref_val=\"bad_value\", backend=\"bokeh\", show=False)\n with pytest.raises(ValueError):\n plot_posterior(models.model_1, point_estimate=\"bad_value\", backend=\"bokeh\", show=False)\n\n\[email protected](\"point_estimate\", (\"mode\", \"mean\", \"median\"))\ndef test_plot_posterior_point_estimates(models, point_estimate):\n axes = plot_posterior(\n models.model_1,\n var_names=(\"mu\", \"tau\"),\n point_estimate=point_estimate,\n backend=\"bokeh\",\n show=False,\n )\n assert axes.shape == (1, 2)\n\n\[email protected](\n \"kwargs\",\n [\n {},\n {\"var_names\": \"mu\"},\n {\"var_names\": (\"mu\", \"tau\"), \"coords\": {\"theta_dim_0\": [0, 1]}},\n {\"var_names\": \"mu\", \"ref_line\": True},\n {\"var_names\": \"mu\", \"ref_line\": False},\n {\"var_names\": \"mu\", \"kind\": \"vlines\"},\n ],\n)\ndef test_plot_rank(models, kwargs):\n axes = plot_rank(models.model_1, backend=\"bokeh\", show=False, **kwargs)\n assert axes.shape\n\n\ndef test_plot_dist_comparison_warn(models):\n with pytest.raises(NotImplementedError, match=\"The bokeh backend.+Use matplotlib backend.\"):\n plot_dist_comparison(models.model_1, backend=\"bokeh\")\n\n\[email protected](\n \"kwargs\",\n [\n {},\n {\"reference\": \"analytical\"},\n {\"kind\": \"p_value\"},\n {\"kind\": \"t_stat\", \"t_stat\": \"std\"},\n {\"kind\": \"t_stat\", \"t_stat\": 0.5, \"bpv\": True},\n ],\n)\ndef test_plot_bpv(models, kwargs):\n axes = plot_bpv(models.model_1, backend=\"bokeh\", show=False, **kwargs)\n assert axes.shape\n"
] | [
[
"numpy.random.beta",
"numpy.arange",
"numpy.all",
"numpy.random.poisson",
"numpy.random.normal",
"numpy.random.randn",
"numpy.any",
"numpy.random.uniform",
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Yi-Zoey/adversarial-robustness-toolbox | [
"d351b3d33c266b436abbf13b9279f266a3dd3062"
] | [
"art/defences/preprocessor/mp3_compression.py"
] | [
"# MIT License\n#\n# Copyright (C) The Adversarial Robustness Toolbox (ART) Authors 2020\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit\n# persons to whom the Software is furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE\n# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,\n# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"\nThis module implements the MP3 compression defence `Mp3Compression`.\n\n| Paper link: https://arxiv.org/abs/1801.01944\n\n| Please keep in mind the limitations of defences. For details on how to evaluate classifier security in general,\n see https://arxiv.org/abs/1902.06705.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nfrom io import BytesIO\nfrom typing import Optional, Tuple\n\nimport numpy as np\nfrom tqdm.auto import tqdm\n\nfrom art.defences.preprocessor.preprocessor import Preprocessor\n\nlogger = logging.getLogger(__name__)\n\n\nclass Mp3Compression(Preprocessor):\n \"\"\"\n Implement the MP3 compression defense approach.\n \"\"\"\n\n params = [\"channels_first\", \"sample_rate\", \"verbose\"]\n\n def __init__(\n self,\n sample_rate: int,\n channels_first: bool = False,\n apply_fit: bool = False,\n apply_predict: bool = True,\n verbose: bool = False,\n ) -> None:\n \"\"\"\n Create an instance of MP3 compression.\n\n :param sample_rate: Specifies the sampling rate of sample.\n :param channels_first: Set channels first or last.\n :param apply_fit: True if applied during fitting/training.\n :param apply_predict: True if applied during predicting.\n :param verbose: Show progress bars.\n \"\"\"\n super().__init__(is_fitted=True, apply_fit=apply_fit, apply_predict=apply_predict)\n self.channels_first = channels_first\n self.sample_rate = sample_rate\n self.verbose = verbose\n self._check_params()\n\n def __call__(self, x: np.ndarray, y: Optional[np.ndarray] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]:\n \"\"\"\n Apply MP3 compression to sample `x`.\n\n :param x: Sample to compress with shape `(batch_size, length, channel)` or an array of sample arrays with shape\n (length,) or (length, channel). `x` values are recommended to be of type `np.int16`.\n :param y: Labels of the sample `x`. This function does not affect them in any way.\n :return: Compressed sample.\n \"\"\"\n\n def wav_to_mp3(x, sample_rate):\n \"\"\"\n Apply MP3 compression to audio input of shape (samples, channel).\n \"\"\"\n from pydub import AudioSegment\n from scipy.io.wavfile import write\n\n normalized = bool(x.min() >= -1.0 and x.max() <= 1.0)\n if x.dtype != np.int16 and not normalized:\n # input is not of type np.int16 and seems to be unnormalized. Therefore casting to np.int16.\n x = x.astype(np.int16)\n elif x.dtype != np.int16 and normalized:\n # x is not of type np.int16 and seems to be normalized. Therefore undoing normalization and\n # casting to np.int16.\n x = (x * 2 ** 15).astype(np.int16)\n\n tmp_wav, tmp_mp3 = BytesIO(), BytesIO()\n write(tmp_wav, sample_rate, x)\n AudioSegment.from_wav(tmp_wav).export(tmp_mp3)\n audio_segment = AudioSegment.from_mp3(tmp_mp3)\n tmp_wav.close()\n tmp_mp3.close()\n x_mp3 = np.array(audio_segment.get_array_of_samples()).reshape((-1, audio_segment.channels))\n return x_mp3\n\n if x.dtype != np.object and x.ndim != 3:\n raise ValueError(\"Mp3 compression can only be applied to temporal data across at least one channel.\")\n\n if x.dtype != np.object and self.channels_first:\n x = np.swapaxes(x, 1, 2)\n\n # apply mp3 compression per audio item\n x_mp3 = x.copy()\n for i, x_i in enumerate(tqdm(x, desc=\"MP3 compression\", disable=not self.verbose)):\n x_i_ndim_0 = x_i.ndim\n if x.dtype == np.object:\n if x_i.ndim == 1:\n x_i = np.expand_dims(x_i, axis=1)\n\n if x_i_ndim_0 == 2 and self.channels_first:\n x_i = np.swapaxes(x_i, 0, 1)\n\n x_i = wav_to_mp3(x_i, self.sample_rate)\n\n if x.dtype == np.object:\n if x_i_ndim_0 == 2 and self.channels_first:\n x_i = np.swapaxes(x_i, 0, 1)\n\n if x_i_ndim_0 == 1:\n x_i = np.squeeze(x_i)\n\n x_mp3[i] = x_i\n\n if x.dtype != np.object and self.channels_first:\n x_mp3 = np.swapaxes(x_mp3, 1, 2)\n\n return x_mp3, y\n\n def _check_params(self) -> None:\n if not (isinstance(self.sample_rate, (int, np.int)) and self.sample_rate > 0):\n raise ValueError(\"Sample rate be must a positive integer.\")\n\n if not isinstance(self.verbose, bool):\n raise ValueError(\"The argument `verbose` has to be of type bool.\")\n"
] | [
[
"numpy.swapaxes",
"numpy.expand_dims",
"scipy.io.wavfile.write",
"numpy.squeeze"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
carlosjimenezmwb/voxceleb_trainer | [
"a5a9118003f3789f583c394ff0971e678d6ff3be"
] | [
"models/tdnn.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n@author: cvqluu\nrepo: https://github.com/cvqluu/TDNN\n\"\"\"\n\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass TDNN(nn.Module):\n \n def __init__(self, input_dim=23, output_dim=512, context_size=5, stride=1, dilation=1, batch_norm=False, dropout_p=0.2):\n '''\n TDNN as defined by https://www.danielpovey.com/files/2015_interspeech_multisplice.pdf\n Affine transformation not applied globally to all frames but smaller windows with local context\n batch_norm: True to include batch normalisation after the non linearity\n \n Context size and dilation determine the frames selected\n (although context size is not really defined in the traditional sense)\n For example:\n context size 5 and dilation 1 is equivalent to [-2,-1,0,1,2]\n context size 3 and dilation 2 is equivalent to [-2, 0, 2]\n context size 1 and dilation 1 is equivalent to [0]\n '''\n super(TDNN, self).__init__()\n self.context_size = context_size\n self.stride = stride\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.dilation = dilation\n self.dropout_p = dropout_p\n self.batch_norm = batch_norm\n \n self.kernel = nn.Linear(input_dim*context_size, output_dim)\n self.nonlinearity = nn.ReLU()\n if self.batch_norm:\n self.bn = nn.BatchNorm1d(output_dim)\n if self.dropout_p:\n self.drop = nn.Dropout(p=self.dropout_p)\n \n def forward(self, x):\n '''\n input: size (batch, seq_len, input_features)\n outpu: size (batch, new_seq_len, output_features)\n '''\n \n _, _, d = x.shape\n assert (d == self.input_dim), 'Input dimension was wrong. Expected ({}), got ({})'.format(self.input_dim, d)\n x = x.unsqueeze(1)\n\n # Unfold input into smaller temporal contexts\n x = F.unfold(\n x, \n (self.context_size, self.input_dim), \n stride=(1,self.input_dim), \n dilation=(self.dilation,1)\n )\n\n # N, output_dim*context_size, new_t = x.shape\n x = x.transpose(1,2)\n x = self.kernel(x.float())\n x = self.nonlinearity(x)\n \n if self.dropout_p:\n x = self.drop(x)\n\n if self.batch_norm:\n x = x.transpose(1,2)\n x = self.bn(x)\n x = x.transpose(1,2)\n\n return x\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.Linear",
"torch.nn.functional.unfold",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
vemel/pandas | [
"1508491df35039ba2ca8b5c8ceecff28464c3bfe"
] | [
"pandas/tests/test_frame.py"
] | [
"# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\n# pylint: disable-msg=W0612,E1101\nfrom copy import deepcopy\nfrom datetime import datetime, timedelta, time\nimport sys\nimport operator\nimport re\nimport csv\nimport nose\nimport functools\nimport itertools\nfrom itertools import product\nfrom distutils.version import LooseVersion\n\nfrom pandas.compat import(\n map, zip, range, long, lrange, lmap, lzip,\n OrderedDict, cPickle as pickle, u, StringIO\n)\nfrom pandas import compat\n\nfrom numpy import random, nan\nfrom numpy.random import randn\nimport numpy as np\nimport numpy.ma as ma\nfrom numpy.testing import assert_array_equal\nimport numpy.ma.mrecords as mrecords\n\nimport pandas.core.nanops as nanops\nimport pandas.core.common as com\nimport pandas.core.format as fmt\nimport pandas.core.datetools as datetools\nfrom pandas import (DataFrame, Index, Series, notnull, isnull,\n MultiIndex, DatetimeIndex, Timestamp, date_range, read_csv)\nimport pandas as pd\nfrom pandas.parser import CParserError\nfrom pandas.util.misc import is_little_endian\n\nfrom pandas.util.testing import (assert_almost_equal,\n assert_series_equal,\n assert_frame_equal,\n assertRaisesRegexp,\n assertRaises,\n makeCustomDataframe as mkdf,\n ensure_clean)\nfrom pandas.core.indexing import IndexingError\nfrom pandas.core.common import PandasError\n\nimport pandas.util.testing as tm\nimport pandas.lib as lib\n\nfrom numpy.testing.decorators import slow\n\n#---------------------------------------------------------------------\n# DataFrame test cases\n\nJOIN_TYPES = ['inner', 'outer', 'left', 'right']\nMIXED_FLOAT_DTYPES = ['float16','float32','float64']\nMIXED_INT_DTYPES = ['uint8','uint16','uint32','uint64','int8','int16',\n 'int32','int64']\n\ndef _check_mixed_float(df, dtype = None):\n\n # float16 are most likely to be upcasted to float32\n dtypes = dict(A = 'float32', B = 'float32', C = 'float16', D = 'float64')\n if isinstance(dtype, compat.string_types):\n dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])\n elif isinstance(dtype, dict):\n dtypes.update(dtype)\n if dtypes.get('A'):\n assert(df.dtypes['A'] == dtypes['A'])\n if dtypes.get('B'):\n assert(df.dtypes['B'] == dtypes['B'])\n if dtypes.get('C'):\n assert(df.dtypes['C'] == dtypes['C'])\n if dtypes.get('D'):\n assert(df.dtypes['D'] == dtypes['D'])\n\n\ndef _check_mixed_int(df, dtype = None):\n dtypes = dict(A = 'int32', B = 'uint64', C = 'uint8', D = 'int64')\n if isinstance(dtype, compat.string_types):\n dtypes = dict([ (k,dtype) for k, v in dtypes.items() ])\n elif isinstance(dtype, dict):\n dtypes.update(dtype)\n if dtypes.get('A'):\n assert(df.dtypes['A'] == dtypes['A'])\n if dtypes.get('B'):\n assert(df.dtypes['B'] == dtypes['B'])\n if dtypes.get('C'):\n assert(df.dtypes['C'] == dtypes['C'])\n if dtypes.get('D'):\n assert(df.dtypes['D'] == dtypes['D'])\n\n\nclass CheckIndexing(object):\n\n _multiprocess_can_split_ = True\n\n def test_getitem(self):\n # slicing\n sl = self.frame[:20]\n self.assertEqual(20, len(sl.index))\n\n # column access\n\n for _, series in compat.iteritems(sl):\n self.assertEqual(20, len(series.index))\n self.assertTrue(tm.equalContents(series.index, sl.index))\n\n for key, _ in compat.iteritems(self.frame._series):\n self.assertIsNotNone(self.frame[key])\n\n self.assertNotIn('random', self.frame)\n with assertRaisesRegexp(KeyError, 'random'):\n self.frame['random']\n\n df = self.frame.copy()\n df['$10'] = randn(len(df))\n ad = randn(len(df))\n df['@awesome_domain'] = ad\n self.assertRaises(KeyError, df.__getitem__, 'df[\"$10\"]')\n res = df['@awesome_domain']\n assert_array_equal(ad, res.values)\n\n def test_getitem_dupe_cols(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])\n try:\n df[['baf']]\n except KeyError:\n pass\n else:\n self.fail(\"Dataframe failed to raise KeyError\")\n\n def test_get(self):\n b = self.frame.get('B')\n assert_series_equal(b, self.frame['B'])\n\n self.assertIsNone(self.frame.get('foo'))\n assert_series_equal(self.frame.get('foo', self.frame['B']),\n self.frame['B'])\n # None\n # GH 5652\n for df in [DataFrame(), DataFrame(columns=list('AB')), DataFrame(columns=list('AB'),index=range(3)) ]:\n result = df.get(None)\n self.assertIsNone(result)\n\n def test_getitem_iterator(self):\n idx = iter(['A', 'B', 'C'])\n result = self.frame.ix[:, idx]\n expected = self.frame.ix[:, ['A', 'B', 'C']]\n assert_frame_equal(result, expected)\n\n def test_getitem_list(self):\n self.frame.columns.name = 'foo'\n\n result = self.frame[['B', 'A']]\n result2 = self.frame[Index(['B', 'A'])]\n\n expected = self.frame.ix[:, ['B', 'A']]\n expected.columns.name = 'foo'\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n self.assertEqual(result.columns.name, 'foo')\n\n with assertRaisesRegexp(KeyError, 'not in index'):\n self.frame[['B', 'A', 'food']]\n with assertRaisesRegexp(KeyError, 'not in index'):\n self.frame[Index(['B', 'A', 'foo'])]\n\n # tuples\n df = DataFrame(randn(8, 3),\n columns=Index([('foo', 'bar'), ('baz', 'qux'),\n ('peek', 'aboo')], name=['sth', 'sth2']))\n\n result = df[[('foo', 'bar'), ('baz', 'qux')]]\n expected = df.ix[:, :2]\n assert_frame_equal(result, expected)\n self.assertEqual(result.columns.names, ['sth', 'sth2'])\n\n def test_setitem_list(self):\n\n self.frame['E'] = 'foo'\n data = self.frame[['A', 'B']]\n self.frame[['B', 'A']] = data\n\n assert_series_equal(self.frame['B'], data['A'])\n assert_series_equal(self.frame['A'], data['B'])\n\n with assertRaisesRegexp(ValueError, 'Columns must be same length as key'):\n data[['A']] = self.frame[['A', 'B']]\n with assertRaisesRegexp(ValueError, 'Length of values does not match '\n 'length of index'):\n data['A'] = range(len(data.index) - 1)\n\n df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)\n df.ix[1, ['tt1', 'tt2']] = [1, 2]\n\n result = df.ix[1, ['tt1', 'tt2']]\n expected = Series([1, 2], df.columns, dtype=np.int_)\n assert_series_equal(result, expected)\n\n df['tt1'] = df['tt2'] = '0'\n df.ix[1, ['tt1', 'tt2']] = ['1', '2']\n result = df.ix[1, ['tt1', 'tt2']]\n expected = Series(['1', '2'], df.columns)\n assert_series_equal(result, expected)\n\n def test_setitem_list_not_dataframe(self):\n data = np.random.randn(len(self.frame), 2)\n self.frame[['A', 'B']] = data\n assert_almost_equal(self.frame[['A', 'B']].values, data)\n\n def test_setitem_list_of_tuples(self):\n tuples = lzip(self.frame['A'], self.frame['B'])\n self.frame['tuples'] = tuples\n\n result = self.frame['tuples']\n expected = Series(tuples, index=self.frame.index)\n assert_series_equal(result, expected)\n\n def test_getitem_boolean(self):\n # boolean indexing\n d = self.tsframe.index[10]\n indexer = self.tsframe.index > d\n indexer_obj = indexer.astype(object)\n\n subindex = self.tsframe.index[indexer]\n subframe = self.tsframe[indexer]\n\n self.assert_numpy_array_equal(subindex, subframe.index)\n with assertRaisesRegexp(ValueError, 'Item wrong length'):\n self.tsframe[indexer[:-1]]\n\n subframe_obj = self.tsframe[indexer_obj]\n assert_frame_equal(subframe_obj, subframe)\n\n with tm.assertRaisesRegexp(ValueError, 'boolean values only'):\n self.tsframe[self.tsframe]\n\n # test that Series work\n indexer_obj = Series(indexer_obj, self.tsframe.index)\n\n subframe_obj = self.tsframe[indexer_obj]\n assert_frame_equal(subframe_obj, subframe)\n\n # test that Series indexers reindex\n import warnings\n warnings.filterwarnings(action='ignore', category=UserWarning)\n\n indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])\n\n subframe_obj = self.tsframe[indexer_obj]\n assert_frame_equal(subframe_obj, subframe)\n\n warnings.filterwarnings(action='default', category=UserWarning)\n\n # test df[df > 0]\n for df in [ self.tsframe, self.mixed_frame, self.mixed_float, self.mixed_int ]:\n\n data = df._get_numeric_data()\n bif = df[df > 0]\n bifw = DataFrame(dict([ (c,np.where(data[c] > 0, data[c], np.nan)) for c in data.columns ]),\n index=data.index, columns=data.columns)\n\n # add back other columns to compare\n for c in df.columns:\n if c not in bifw:\n bifw[c] = df[c]\n bifw = bifw.reindex(columns = df.columns)\n\n assert_frame_equal(bif, bifw, check_dtype=False)\n for c in df.columns:\n if bif[c].dtype != bifw[c].dtype:\n self.assertEqual(bif[c].dtype, df[c].dtype)\n\n def test_getitem_boolean_casting(self):\n\n # don't upcast if we don't need to\n df = self.tsframe.copy()\n df['E'] = 1\n df['E'] = df['E'].astype('int32')\n df['E1'] = df['E'].copy()\n df['F'] = 1\n df['F'] = df['F'].astype('int64')\n df['F1'] = df['F'].copy()\n\n casted = df[df>0]\n result = casted.get_dtype_counts()\n expected = Series({'float64': 4, 'int32' : 2, 'int64' : 2})\n assert_series_equal(result, expected)\n\n # int block splitting\n df.ix[1:3,['E1','F1']] = 0\n casted = df[df>0]\n result = casted.get_dtype_counts()\n expected = Series({'float64': 6, 'int32' : 1, 'int64' : 1})\n assert_series_equal(result, expected)\n\n # where dtype conversions\n # GH 3733\n df = DataFrame(data = np.random.randn(100, 50))\n df = df.where(df > 0) # create nans\n bools = df > 0\n mask = isnull(df)\n expected = bools.astype(float).mask(mask)\n result = bools.mask(mask)\n assert_frame_equal(result,expected)\n\n def test_getitem_boolean_list(self):\n df = DataFrame(np.arange(12).reshape(3, 4))\n\n def _checkit(lst):\n result = df[lst]\n expected = df.ix[df.index[lst]]\n assert_frame_equal(result, expected)\n\n _checkit([True, False, True])\n _checkit([True, True, True])\n _checkit([False, False, False])\n\n def test_getitem_boolean_iadd(self):\n arr = randn(5, 5)\n\n df = DataFrame(arr.copy(), columns = ['A','B','C','D','E'])\n\n df[df < 0] += 1\n arr[arr < 0] += 1\n\n assert_almost_equal(df.values, arr)\n\n def test_boolean_index_empty_corner(self):\n # #2096\n blah = DataFrame(np.empty([0, 1]), columns=['A'],\n index=DatetimeIndex([]))\n\n # both of these should succeed trivially\n k = np.array([], bool)\n\n blah[k]\n blah[k] = 0\n\n def test_getitem_ix_mixed_integer(self):\n df = DataFrame(np.random.randn(4, 3),\n index=[1, 10, 'C', 'E'], columns=[1, 2, 3])\n\n result = df.ix[:-1]\n expected = df.ix[df.index[:-1]]\n assert_frame_equal(result, expected)\n\n result = df.ix[[1, 10]]\n expected = df.ix[Index([1, 10], dtype=object)]\n assert_frame_equal(result, expected)\n\n def test_getitem_setitem_ix_negative_integers(self):\n result = self.frame.ix[:, -1]\n assert_series_equal(result, self.frame['D'])\n\n result = self.frame.ix[:, [-1]]\n assert_frame_equal(result, self.frame[['D']])\n\n result = self.frame.ix[:, [-1, -2]]\n assert_frame_equal(result, self.frame[['D', 'C']])\n\n self.frame.ix[:, [-1]] = 0\n self.assertTrue((self.frame['D'] == 0).all())\n\n df = DataFrame(np.random.randn(8, 4))\n self.assertTrue(isnull(df.ix[:, [-1]].values).all())\n\n # #1942\n a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])\n a.ix[-1] = a.ix[-2]\n\n assert_series_equal(a.ix[-1], a.ix[-2])\n\n def test_getattr(self):\n tm.assert_series_equal(self.frame.A, self.frame['A'])\n self.assertRaises(AttributeError, getattr, self.frame,\n 'NONEXISTENT_NAME')\n\n def test_setattr_column(self):\n df = DataFrame({'foobar': 1}, index=lrange(10))\n\n df.foobar = 5\n self.assertTrue((df.foobar == 5).all())\n\n def test_setitem(self):\n # not sure what else to do here\n series = self.frame['A'][::2]\n self.frame['col5'] = series\n self.assertIn('col5', self.frame)\n tm.assert_dict_equal(series, self.frame['col5'],\n compare_keys=False)\n\n series = self.frame['A']\n self.frame['col6'] = series\n tm.assert_dict_equal(series, self.frame['col6'],\n compare_keys=False)\n\n with tm.assertRaises(KeyError):\n self.frame[randn(len(self.frame) + 1)] = 1\n\n # set ndarray\n arr = randn(len(self.frame))\n self.frame['col9'] = arr\n self.assertTrue((self.frame['col9'] == arr).all())\n\n self.frame['col7'] = 5\n assert((self.frame['col7'] == 5).all())\n\n self.frame['col0'] = 3.14\n assert((self.frame['col0'] == 3.14).all())\n\n self.frame['col8'] = 'foo'\n assert((self.frame['col8'] == 'foo').all())\n\n smaller = self.frame[:2]\n smaller['col10'] = ['1', '2']\n self.assertEqual(smaller['col10'].dtype, np.object_)\n self.assertTrue((smaller['col10'] == ['1', '2']).all())\n\n # with a dtype\n for dtype in ['int32','int64','float32','float64']:\n self.frame[dtype] = np.array(arr,dtype=dtype)\n self.assertEqual(self.frame[dtype].dtype.name, dtype)\n\n # dtype changing GH4204\n df = DataFrame([[0,0]])\n df.iloc[0] = np.nan\n expected = DataFrame([[np.nan,np.nan]])\n assert_frame_equal(df,expected)\n\n df = DataFrame([[0,0]])\n df.loc[0] = np.nan\n assert_frame_equal(df,expected)\n\n def test_setitem_tuple(self):\n self.frame['A', 'B'] = self.frame['A']\n assert_series_equal(self.frame['A', 'B'], self.frame['A'])\n\n def test_setitem_always_copy(self):\n s = self.frame['A'].copy()\n self.frame['E'] = s\n\n self.frame['E'][5:10] = nan\n self.assertTrue(notnull(s[5:10]).all())\n\n def test_setitem_boolean(self):\n df = self.frame.copy()\n values = self.frame.values\n\n df[df['A'] > 0] = 4\n values[values[:, 0] > 0] = 4\n assert_almost_equal(df.values, values)\n\n # test that column reindexing works\n series = df['A'] == 4\n series = series.reindex(df.index[::-1])\n df[series] = 1\n values[values[:, 0] == 4] = 1\n assert_almost_equal(df.values, values)\n\n df[df > 0] = 5\n values[values > 0] = 5\n assert_almost_equal(df.values, values)\n\n df[df == 5] = 0\n values[values == 5] = 0\n assert_almost_equal(df.values, values)\n\n # a df that needs alignment first\n df[df[:-1] < 0] = 2\n np.putmask(values[:-1], values[:-1] < 0, 2)\n assert_almost_equal(df.values, values)\n\n # indexed with same shape but rows-reversed df\n df[df[::-1] == 2] = 3\n values[values == 2] = 3\n assert_almost_equal(df.values, values)\n\n with assertRaisesRegexp(TypeError, 'Must pass DataFrame with boolean '\n 'values only'):\n df[df * 0] = 2\n\n # index with DataFrame\n mask = df > np.abs(df)\n expected = df.copy()\n df[df > np.abs(df)] = nan\n expected.values[mask.values] = nan\n assert_frame_equal(df, expected)\n\n # set from DataFrame\n expected = df.copy()\n df[df > np.abs(df)] = df * 2\n np.putmask(expected.values, mask.values, df.values * 2)\n assert_frame_equal(df, expected)\n\n def test_setitem_cast(self):\n self.frame['D'] = self.frame['D'].astype('i8')\n self.assertEqual(self.frame['D'].dtype, np.int64)\n\n # #669, should not cast?\n # this is now set to int64, which means a replacement of the column to\n # the value dtype (and nothing to do with the existing dtype)\n self.frame['B'] = 0\n self.assertEqual(self.frame['B'].dtype, np.int64)\n\n # cast if pass array of course\n self.frame['B'] = np.arange(len(self.frame))\n self.assertTrue(issubclass(self.frame['B'].dtype.type, np.integer))\n\n self.frame['foo'] = 'bar'\n self.frame['foo'] = 0\n self.assertEqual(self.frame['foo'].dtype, np.int64)\n\n self.frame['foo'] = 'bar'\n self.frame['foo'] = 2.5\n self.assertEqual(self.frame['foo'].dtype, np.float64)\n\n self.frame['something'] = 0\n self.assertEqual(self.frame['something'].dtype, np.int64)\n self.frame['something'] = 2\n self.assertEqual(self.frame['something'].dtype, np.int64)\n self.frame['something'] = 2.5\n self.assertEqual(self.frame['something'].dtype, np.float64)\n\n def test_setitem_boolean_column(self):\n expected = self.frame.copy()\n mask = self.frame['A'] > 0\n\n self.frame.ix[mask, 'B'] = 0\n expected.values[mask.values, 1] = 0\n\n assert_frame_equal(self.frame, expected)\n\n def test_setitem_corner(self):\n # corner case\n df = DataFrame({'B': [1., 2., 3.],\n 'C': ['a', 'b', 'c']},\n index=np.arange(3))\n del df['B']\n df['B'] = [1., 2., 3.]\n self.assertIn('B', df)\n self.assertEqual(len(df.columns), 2)\n\n df['A'] = 'beginning'\n df['E'] = 'foo'\n df['D'] = 'bar'\n df[datetime.now()] = 'date'\n df[datetime.now()] = 5.\n\n # what to do when empty frame with index\n dm = DataFrame(index=self.frame.index)\n dm['A'] = 'foo'\n dm['B'] = 'bar'\n self.assertEqual(len(dm.columns), 2)\n self.assertEqual(dm.values.dtype, np.object_)\n\n # upcast\n dm['C'] = 1\n self.assertEqual(dm['C'].dtype, np.int64)\n\n dm['E'] = 1.\n self.assertEqual(dm['E'].dtype, np.float64)\n\n # set existing column\n dm['A'] = 'bar'\n self.assertEqual('bar', dm['A'][0])\n\n dm = DataFrame(index=np.arange(3))\n dm['A'] = 1\n dm['foo'] = 'bar'\n del dm['foo']\n dm['foo'] = 'bar'\n self.assertEqual(dm['foo'].dtype, np.object_)\n\n dm['coercable'] = ['1', '2', '3']\n self.assertEqual(dm['coercable'].dtype, np.object_)\n\n def test_setitem_corner2(self):\n data = {\"title\": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,\n \"cruft\": np.random.random(20)}\n\n df = DataFrame(data)\n ix = df[df['title'] == 'bar'].index\n\n df.ix[ix, ['title']] = 'foobar'\n df.ix[ix, ['cruft']] = 0\n\n assert(df.ix[1, 'title'] == 'foobar')\n assert(df.ix[1, 'cruft'] == 0)\n\n def test_setitem_ambig(self):\n # difficulties with mixed-type data\n from decimal import Decimal\n\n # created as float type\n dm = DataFrame(index=lrange(3), columns=lrange(3))\n\n coercable_series = Series([Decimal(1) for _ in range(3)],\n index=lrange(3))\n uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))\n\n dm[0] = np.ones(3)\n self.assertEqual(len(dm.columns), 3)\n # self.assertIsNone(dm.objects)\n\n dm[1] = coercable_series\n self.assertEqual(len(dm.columns), 3)\n # self.assertIsNone(dm.objects)\n\n dm[2] = uncoercable_series\n self.assertEqual(len(dm.columns), 3)\n # self.assertIsNotNone(dm.objects)\n self.assertEqual(dm[2].dtype, np.object_)\n\n def test_setitem_clear_caches(self):\n # GH #304\n df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},\n index=[0, 1, 2, 3])\n df.insert(2, 'z', np.nan)\n\n # cache it\n foo = df['z']\n\n df.ix[2:, 'z'] = 42\n\n expected = Series([np.nan, np.nan, 42, 42], index=df.index)\n self.assertIsNot(df['z'], foo)\n assert_series_equal(df['z'], expected)\n\n def test_setitem_None(self):\n # GH #766\n self.frame[None] = self.frame['A']\n assert_series_equal(self.frame.iloc[:,-1], self.frame['A'])\n assert_series_equal(self.frame.loc[:,None], self.frame['A'])\n assert_series_equal(self.frame[None], self.frame['A'])\n repr(self.frame)\n\n def test_delitem_corner(self):\n f = self.frame.copy()\n del f['D']\n self.assertEqual(len(f.columns), 3)\n self.assertRaises(KeyError, f.__delitem__, 'D')\n del f['B']\n self.assertEqual(len(f.columns), 2)\n\n def test_getitem_fancy_2d(self):\n f = self.frame\n ix = f.ix\n\n assert_frame_equal(ix[:, ['B', 'A']], f.reindex(columns=['B', 'A']))\n\n subidx = self.frame.index[[5, 4, 1]]\n assert_frame_equal(ix[subidx, ['B', 'A']],\n f.reindex(index=subidx, columns=['B', 'A']))\n\n # slicing rows, etc.\n assert_frame_equal(ix[5:10], f[5:10])\n assert_frame_equal(ix[5:10, :], f[5:10])\n assert_frame_equal(ix[:5, ['A', 'B']],\n f.reindex(index=f.index[:5], columns=['A', 'B']))\n\n # slice rows with labels, inclusive!\n expected = ix[5:11]\n result = ix[f.index[5]:f.index[10]]\n assert_frame_equal(expected, result)\n\n # slice columns\n assert_frame_equal(ix[:, :2], f.reindex(columns=['A', 'B']))\n\n # get view\n exp = f.copy()\n ix[5:10].values[:] = 5\n exp.values[5:10] = 5\n assert_frame_equal(f, exp)\n\n self.assertRaises(ValueError, ix.__getitem__, f > 0.5)\n\n def test_slice_floats(self):\n index = [52195.504153, 52196.303147, 52198.369883]\n df = DataFrame(np.random.rand(3, 2), index=index)\n\n s1 = df.ix[52195.1:52196.5]\n self.assertEqual(len(s1), 2)\n\n s1 = df.ix[52195.1:52196.6]\n self.assertEqual(len(s1), 2)\n\n s1 = df.ix[52195.1:52198.9]\n self.assertEqual(len(s1), 3)\n\n def test_getitem_fancy_slice_integers_step(self):\n df = DataFrame(np.random.randn(10, 5))\n\n # this is OK\n result = df.ix[:8:2]\n df.ix[:8:2] = np.nan\n self.assertTrue(isnull(df.ix[:8:2]).values.all())\n\n def test_getitem_setitem_integer_slice_keyerrors(self):\n df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))\n\n # this is OK\n cp = df.copy()\n cp.ix[4:10] = 0\n self.assertTrue((cp.ix[4:10] == 0).values.all())\n\n # so is this\n cp = df.copy()\n cp.ix[3:11] = 0\n self.assertTrue((cp.ix[3:11] == 0).values.all())\n\n result = df.ix[4:10]\n result2 = df.ix[3:11]\n expected = df.reindex([4, 6, 8, 10])\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n # non-monotonic, raise KeyError\n df2 = df[::-1]\n self.assertRaises(KeyError, df2.ix.__getitem__, slice(3, 11))\n self.assertRaises(KeyError, df2.ix.__setitem__, slice(3, 11), 0)\n\n def test_setitem_fancy_2d(self):\n f = self.frame\n ix = f.ix\n\n # case 1\n frame = self.frame.copy()\n expected = frame.copy()\n frame.ix[:, ['B', 'A']] = 1\n expected['B'] = 1.\n expected['A'] = 1.\n assert_frame_equal(frame, expected)\n\n # case 2\n frame = self.frame.copy()\n frame2 = self.frame.copy()\n\n expected = frame.copy()\n\n subidx = self.frame.index[[5, 4, 1]]\n values = randn(3, 2)\n\n frame.ix[subidx, ['B', 'A']] = values\n frame2.ix[[5, 4, 1], ['B', 'A']] = values\n\n expected['B'].ix[subidx] = values[:, 0]\n expected['A'].ix[subidx] = values[:, 1]\n\n assert_frame_equal(frame, expected)\n assert_frame_equal(frame2, expected)\n\n # case 3: slicing rows, etc.\n frame = self.frame.copy()\n\n expected1 = self.frame.copy()\n frame.ix[5:10] = 1.\n expected1.values[5:10] = 1.\n assert_frame_equal(frame, expected1)\n\n expected2 = self.frame.copy()\n arr = randn(5, len(frame.columns))\n frame.ix[5:10] = arr\n expected2.values[5:10] = arr\n assert_frame_equal(frame, expected2)\n\n # case 4\n frame = self.frame.copy()\n frame.ix[5:10, :] = 1.\n assert_frame_equal(frame, expected1)\n frame.ix[5:10, :] = arr\n assert_frame_equal(frame, expected2)\n\n # case 5\n frame = self.frame.copy()\n frame2 = self.frame.copy()\n\n expected = self.frame.copy()\n values = randn(5, 2)\n\n frame.ix[:5, ['A', 'B']] = values\n expected['A'][:5] = values[:, 0]\n expected['B'][:5] = values[:, 1]\n assert_frame_equal(frame, expected)\n\n frame2.ix[:5, [0, 1]] = values\n assert_frame_equal(frame2, expected)\n\n # case 6: slice rows with labels, inclusive!\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[frame.index[5]:frame.index[10]] = 5.\n expected.values[5:11] = 5\n assert_frame_equal(frame, expected)\n\n # case 7: slice columns\n frame = self.frame.copy()\n frame2 = self.frame.copy()\n expected = self.frame.copy()\n\n # slice indices\n frame.ix[:, 1:3] = 4.\n expected.values[:, 1:3] = 4.\n assert_frame_equal(frame, expected)\n\n # slice with labels\n frame.ix[:, 'B':'C'] = 4.\n assert_frame_equal(frame, expected)\n\n # new corner case of boolean slicing / setting\n frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),\n columns=['a', 'b'])\n lst = [100]\n lst.extend([np.nan] * 4)\n expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),\n columns=['a', 'b'])\n frame[frame['a'] == 2] = 100\n assert_frame_equal(frame, expected)\n\n def test_fancy_getitem_slice_mixed(self):\n sliced = self.mixed_frame.ix[:, -3:]\n self.assertEqual(sliced['D'].dtype, np.float64)\n\n # get view with single block\n sliced = self.frame.ix[:, -3:]\n sliced['C'] = 4.\n self.assertTrue((self.frame['C'] == 4).all())\n\n def test_fancy_setitem_int_labels(self):\n # integer index defers to label-based indexing\n\n df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))\n\n tmp = df.copy()\n exp = df.copy()\n tmp.ix[[0, 2, 4]] = 5\n exp.values[:3] = 5\n assert_frame_equal(tmp, exp)\n\n tmp = df.copy()\n exp = df.copy()\n tmp.ix[6] = 5\n exp.values[3] = 5\n assert_frame_equal(tmp, exp)\n\n tmp = df.copy()\n exp = df.copy()\n tmp.ix[:, 2] = 5\n exp.values[:, 2] = 5\n assert_frame_equal(tmp, exp)\n\n def test_fancy_getitem_int_labels(self):\n df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))\n\n result = df.ix[[4, 2, 0], [2, 0]]\n expected = df.reindex(index=[4, 2, 0], columns=[2, 0])\n assert_frame_equal(result, expected)\n\n result = df.ix[[4, 2, 0]]\n expected = df.reindex(index=[4, 2, 0])\n assert_frame_equal(result, expected)\n\n result = df.ix[4]\n expected = df.xs(4)\n assert_series_equal(result, expected)\n\n result = df.ix[:, 3]\n expected = df[3]\n assert_series_equal(result, expected)\n\n def test_fancy_index_int_labels_exceptions(self):\n df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))\n\n # labels that aren't contained\n self.assertRaises(KeyError, df.ix.__setitem__,\n ([0, 1, 2], [2, 3, 4]), 5)\n\n # try to set indices not contained in frame\n self.assertRaises(KeyError,\n self.frame.ix.__setitem__,\n ['foo', 'bar', 'baz'], 1)\n self.assertRaises(KeyError,\n self.frame.ix.__setitem__,\n (slice(None, None), ['E']), 1)\n\n # partial setting now allows this GH2578\n #self.assertRaises(KeyError,\n # self.frame.ix.__setitem__,\n # (slice(None, None), 'E'), 1)\n\n def test_setitem_fancy_mixed_2d(self):\n self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5\n result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]\n self.assertTrue((result.values == 5).all())\n\n self.mixed_frame.ix[5] = np.nan\n self.assertTrue(isnull(self.mixed_frame.ix[5]).all())\n\n self.mixed_frame.ix[5] = self.mixed_frame.ix[6]\n assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6])\n\n # #1432\n df = DataFrame({1: [1., 2., 3.],\n 2: [3, 4, 5]})\n self.assertTrue(df._is_mixed_type)\n\n df.ix[1] = [5, 10]\n\n expected = DataFrame({1: [1., 5., 3.],\n 2: [3, 10, 5]})\n\n assert_frame_equal(df, expected)\n\n def test_ix_align(self):\n b = Series(randn(10))\n b.sort()\n df_orig = DataFrame(randn(10, 4))\n df = df_orig.copy()\n\n df.ix[:, 0] = b\n assert_series_equal(df.ix[:, 0].reindex(b.index), b)\n\n dft = df_orig.T\n dft.ix[0, :] = b\n assert_series_equal(dft.ix[0, :].reindex(b.index), b)\n\n df = df_orig.copy()\n df.ix[:5, 0] = b\n s = df.ix[:5, 0]\n assert_series_equal(s, b.reindex(s.index))\n\n dft = df_orig.T\n dft.ix[0, :5] = b\n s = dft.ix[0, :5]\n assert_series_equal(s, b.reindex(s.index))\n\n df = df_orig.copy()\n idx = [0, 1, 3, 5]\n df.ix[idx, 0] = b\n s = df.ix[idx, 0]\n assert_series_equal(s, b.reindex(s.index))\n\n dft = df_orig.T\n dft.ix[0, idx] = b\n s = dft.ix[0, idx]\n assert_series_equal(s, b.reindex(s.index))\n\n def test_ix_frame_align(self):\n b = DataFrame(np.random.randn(3, 4))\n df_orig = DataFrame(randn(10, 4))\n df = df_orig.copy()\n\n df.ix[:3] = b\n out = b.ix[:3]\n assert_frame_equal(out, b)\n\n b.sort_index(inplace=True)\n\n df = df_orig.copy()\n df.ix[[0, 1, 2]] = b\n out = df.ix[[0, 1, 2]].reindex(b.index)\n assert_frame_equal(out, b)\n\n df = df_orig.copy()\n df.ix[:3] = b\n out = df.ix[:3]\n assert_frame_equal(out, b.reindex(out.index))\n\n def test_getitem_setitem_non_ix_labels(self):\n df = tm.makeTimeDataFrame()\n\n start, end = df.index[[5, 10]]\n\n result = df.ix[start:end]\n result2 = df[start:end]\n expected = df[5:11]\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n result = df.copy()\n result.ix[start:end] = 0\n result2 = df.copy()\n result2[start:end] = 0\n expected = df.copy()\n expected[5:11] = 0\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n\n def test_ix_multi_take(self):\n df = DataFrame(np.random.randn(3, 2))\n rs = df.ix[df.index == 0, :]\n xp = df.reindex([0])\n assert_frame_equal(rs, xp)\n\n \"\"\" #1321\n df = DataFrame(np.random.randn(3, 2))\n rs = df.ix[df.index==0, df.columns==1]\n xp = df.reindex([0], [1])\n assert_frame_equal(rs, xp)\n \"\"\"\n\n def test_ix_multi_take_nonint_index(self):\n df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],\n columns=['a', 'b'])\n rs = df.ix[[0], [0]]\n xp = df.reindex(['x'], columns=['a'])\n assert_frame_equal(rs, xp)\n\n def test_ix_multi_take_multiindex(self):\n df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],\n columns=[['a', 'b'], ['1', '2']])\n rs = df.ix[[0], [0]]\n xp = df.reindex(['x'], columns=[('a', '1')])\n assert_frame_equal(rs, xp)\n\n def test_ix_dup(self):\n idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])\n df = DataFrame(np.random.randn(len(idx), 3), idx)\n\n sub = df.ix[:'d']\n assert_frame_equal(sub, df)\n\n sub = df.ix['a':'c']\n assert_frame_equal(sub, df.ix[0:4])\n\n sub = df.ix['b':'d']\n assert_frame_equal(sub, df.ix[2:])\n\n def test_getitem_fancy_1d(self):\n f = self.frame\n ix = f.ix\n\n # return self if no slicing...for now\n self.assertIs(ix[:, :], f)\n\n # low dimensional slice\n xs1 = ix[2, ['C', 'B', 'A']]\n xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])\n assert_series_equal(xs1, xs2)\n\n ts1 = ix[5:10, 2]\n ts2 = f[f.columns[2]][5:10]\n assert_series_equal(ts1, ts2)\n\n # positional xs\n xs1 = ix[0]\n xs2 = f.xs(f.index[0])\n assert_series_equal(xs1, xs2)\n\n xs1 = ix[f.index[5]]\n xs2 = f.xs(f.index[5])\n assert_series_equal(xs1, xs2)\n\n # single column\n assert_series_equal(ix[:, 'A'], f['A'])\n\n # return view\n exp = f.copy()\n exp.values[5] = 4\n ix[5][:] = 4\n assert_frame_equal(exp, f)\n\n exp.values[:, 1] = 6\n ix[:, 1][:] = 6\n assert_frame_equal(exp, f)\n\n # slice of mixed-frame\n xs = self.mixed_frame.ix[5]\n exp = self.mixed_frame.xs(self.mixed_frame.index[5])\n assert_series_equal(xs, exp)\n\n def test_setitem_fancy_1d(self):\n\n # case 1: set cross-section for indices\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]\n expected['C'][2] = 1.\n expected['B'][2] = 2.\n expected['A'][2] = 3.\n assert_frame_equal(frame, expected)\n\n frame2 = self.frame.copy()\n frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]\n assert_frame_equal(frame, expected)\n\n # case 2, set a section of a column\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n vals = randn(5)\n expected.values[5:10, 2] = vals\n frame.ix[5:10, 2] = vals\n assert_frame_equal(frame, expected)\n\n frame2 = self.frame.copy()\n frame2.ix[5:10, 'B'] = vals\n assert_frame_equal(frame, expected)\n\n # case 3: full xs\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[4] = 5.\n expected.values[4] = 5.\n assert_frame_equal(frame, expected)\n\n frame.ix[frame.index[4]] = 6.\n expected.values[4] = 6.\n assert_frame_equal(frame, expected)\n\n # single column\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n frame.ix[:, 'A'] = 7.\n expected['A'] = 7.\n assert_frame_equal(frame, expected)\n\n def test_getitem_fancy_scalar(self):\n f = self.frame\n ix = f.ix\n # individual value\n for col in f.columns:\n ts = f[col]\n for idx in f.index[::5]:\n assert_almost_equal(ix[idx, col], ts[idx])\n\n def test_setitem_fancy_scalar(self):\n f = self.frame\n expected = self.frame.copy()\n ix = f.ix\n # individual value\n for j, col in enumerate(f.columns):\n ts = f[col]\n for idx in f.index[::5]:\n i = f.index.get_loc(idx)\n val = randn()\n expected.values[i, j] = val\n ix[idx, col] = val\n assert_frame_equal(f, expected)\n\n def test_getitem_fancy_boolean(self):\n f = self.frame\n ix = f.ix\n\n expected = f.reindex(columns=['B', 'D'])\n result = ix[:, [False, True, False, True]]\n assert_frame_equal(result, expected)\n\n expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])\n result = ix[5:10, [False, True, False, True]]\n assert_frame_equal(result, expected)\n\n boolvec = f.index > f.index[7]\n expected = f.reindex(index=f.index[boolvec])\n result = ix[boolvec]\n assert_frame_equal(result, expected)\n result = ix[boolvec, :]\n assert_frame_equal(result, expected)\n\n result = ix[boolvec, 2:]\n expected = f.reindex(index=f.index[boolvec],\n columns=['C', 'D'])\n assert_frame_equal(result, expected)\n\n def test_setitem_fancy_boolean(self):\n # from 2d, set with booleans\n frame = self.frame.copy()\n expected = self.frame.copy()\n\n mask = frame['A'] > 0\n frame.ix[mask] = 0.\n expected.values[mask.values] = 0.\n assert_frame_equal(frame, expected)\n\n frame = self.frame.copy()\n expected = self.frame.copy()\n frame.ix[mask, ['A', 'B']] = 0.\n expected.values[mask.values, :2] = 0.\n assert_frame_equal(frame, expected)\n\n def test_getitem_fancy_ints(self):\n result = self.frame.ix[[1, 4, 7]]\n expected = self.frame.ix[self.frame.index[[1, 4, 7]]]\n assert_frame_equal(result, expected)\n\n result = self.frame.ix[:, [2, 0, 1]]\n expected = self.frame.ix[:, self.frame.columns[[2, 0, 1]]]\n assert_frame_equal(result, expected)\n\n def test_getitem_setitem_fancy_exceptions(self):\n ix = self.frame.ix\n with assertRaisesRegexp(IndexingError, 'Too many indexers'):\n ix[:, :, :]\n with assertRaisesRegexp(IndexingError, 'only tuples of length <= 2 '\n 'supported'):\n ix[:, :, :] = 1\n\n def test_getitem_setitem_boolean_misaligned(self):\n # boolean index misaligned labels\n mask = self.frame['A'][::-1] > 1\n\n result = self.frame.ix[mask]\n expected = self.frame.ix[mask[::-1]]\n assert_frame_equal(result, expected)\n\n cp = self.frame.copy()\n expected = self.frame.copy()\n cp.ix[mask] = 0\n expected.ix[mask] = 0\n assert_frame_equal(cp, expected)\n\n def test_getitem_setitem_boolean_multi(self):\n df = DataFrame(np.random.randn(3, 2))\n\n # get\n k1 = np.array([True, False, True])\n k2 = np.array([False, True])\n result = df.ix[k1, k2]\n expected = df.ix[[0, 2], [1]]\n assert_frame_equal(result, expected)\n\n expected = df.copy()\n df.ix[np.array([True, False, True]),\n np.array([False, True])] = 5\n expected.ix[[0, 2], [1]] = 5\n assert_frame_equal(df, expected)\n\n def test_getitem_setitem_float_labels(self):\n index = Index([1.5, 2, 3, 4, 5])\n df = DataFrame(np.random.randn(5, 5), index=index)\n\n result = df.ix[1.5:4]\n expected = df.reindex([1.5, 2, 3, 4])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 4)\n\n result = df.ix[4:5]\n expected = df.reindex([4, 5])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 2)\n\n # loc_float changes this to work properly\n result = df.ix[1:2]\n expected = df.iloc[0:2]\n assert_frame_equal(result, expected)\n\n df.ix[1:2] = 0\n result = df[1:2]\n self.assertTrue((result==0).all().all())\n\n # #2727\n index = Index([1.0, 2.5, 3.5, 4.5, 5.0])\n df = DataFrame(np.random.randn(5, 5), index=index)\n\n # positional slicing only via iloc!\n with tm.assert_produces_warning(FutureWarning):\n result = df.iloc[1.0:5]\n\n expected = df.reindex([2.5, 3.5, 4.5, 5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 4)\n\n result = df.iloc[4:5]\n expected = df.reindex([5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 1)\n\n # GH 4892, float indexers in iloc are deprecated\n import warnings\n warnings.filterwarnings(action='error', category=FutureWarning)\n\n cp = df.copy()\n def f():\n cp.iloc[1.0:5] = 0\n self.assertRaises(FutureWarning, f)\n def f():\n result = cp.iloc[1.0:5] == 0\n self.assertRaises(FutureWarning, f)\n self.assertTrue(result.values.all())\n self.assertTrue((cp.iloc[0:1] == df.iloc[0:1]).values.all())\n\n warnings.filterwarnings(action='ignore', category=FutureWarning)\n\n cp = df.copy()\n cp.iloc[4:5] = 0\n self.assertTrue((cp.iloc[4:5] == 0).values.all())\n self.assertTrue((cp.iloc[0:4] == df.iloc[0:4]).values.all())\n\n # float slicing\n result = df.ix[1.0:5]\n expected = df\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 5)\n\n result = df.ix[1.1:5]\n expected = df.reindex([2.5, 3.5, 4.5, 5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 4)\n\n result = df.ix[4.51:5]\n expected = df.reindex([5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 1)\n\n result = df.ix[1.0:5.0]\n expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])\n assert_frame_equal(result, expected)\n self.assertEqual(len(result), 5)\n\n cp = df.copy()\n cp.ix[1.0:5.0] = 0\n result = cp.ix[1.0:5.0]\n self.assertTrue((result == 0).values.all())\n\n def test_setitem_single_column_mixed(self):\n df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],\n columns=['foo', 'bar', 'baz'])\n df['str'] = 'qux'\n df.ix[::2, 'str'] = nan\n expected = [nan, 'qux', nan, 'qux', nan]\n assert_almost_equal(df['str'].values, expected)\n\n def test_setitem_single_column_mixed_datetime(self):\n df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],\n columns=['foo', 'bar', 'baz'])\n\n df['timestamp'] = Timestamp('20010102')\n\n # check our dtypes\n result = df.get_dtype_counts()\n expected = Series({'float64': 3, 'datetime64[ns]': 1})\n assert_series_equal(result, expected)\n\n # set an allowable datetime64 type\n from pandas import tslib\n df.ix['b', 'timestamp'] = tslib.iNaT\n self.assertTrue(com.isnull(df.ix['b', 'timestamp']))\n\n # allow this syntax\n df.ix['c', 'timestamp'] = nan\n self.assertTrue(com.isnull(df.ix['c', 'timestamp']))\n\n # allow this syntax\n df.ix['d', :] = nan\n self.assertTrue(com.isnull(df.ix['c', :]).all() == False)\n\n # as of GH 3216 this will now work!\n # try to set with a list like item\n #self.assertRaises(\n # Exception, df.ix.__setitem__, ('d', 'timestamp'), [nan])\n\n def test_setitem_frame(self):\n piece = self.frame.ix[:2, ['A', 'B']]\n self.frame.ix[-2:, ['A', 'B']] = piece.values\n assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n # GH 3216\n\n # already aligned\n f = self.mixed_frame.copy()\n piece = DataFrame([[ 1, 2], [3, 4]], index=f.index[0:2],columns=['A', 'B'])\n key = (slice(None,2), ['A', 'B'])\n f.ix[key] = piece\n assert_almost_equal(f.ix[0:2, ['A', 'B']].values,\n piece.values)\n\n # rows unaligned\n f = self.mixed_frame.copy()\n piece = DataFrame([[ 1, 2 ], [3, 4], [5, 6], [7, 8]], index=list(f.index[0:2]) + ['foo','bar'],columns=['A', 'B'])\n key = (slice(None,2), ['A', 'B'])\n f.ix[key] = piece\n assert_almost_equal(f.ix[0:2:, ['A', 'B']].values,\n piece.values[0:2])\n\n # key is unaligned with values\n f = self.mixed_frame.copy()\n piece = f.ix[:2, ['A']]\n key = (slice(-2, None), ['A', 'B'])\n f.ix[key] = piece\n piece['B'] = np.nan\n assert_almost_equal(f.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n # ndarray\n f = self.mixed_frame.copy()\n piece = self.mixed_frame.ix[:2, ['A', 'B']]\n key = (slice(-2, None), ['A', 'B'])\n f.ix[key] = piece.values\n assert_almost_equal(f.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n\n # needs upcasting\n df = DataFrame([[1,2,'foo'],[3,4,'bar']],columns=['A','B','C'])\n df2 = df.copy()\n df2.ix[:,['A','B']] = df.ix[:,['A','B']]+0.5\n expected = df.reindex(columns=['A','B'])\n expected += 0.5\n expected['C'] = df['C']\n assert_frame_equal(df2, expected)\n\n def test_setitem_frame_align(self):\n piece = self.frame.ix[:2, ['A', 'B']]\n piece.index = self.frame.index[-2:]\n piece.columns = ['A', 'B']\n self.frame.ix[-2:, ['A', 'B']] = piece\n assert_almost_equal(self.frame.ix[-2:, ['A', 'B']].values,\n piece.values)\n\n def test_setitem_fancy_exceptions(self):\n pass\n\n def test_getitem_boolean_missing(self):\n pass\n\n def test_setitem_boolean_missing(self):\n pass\n\n def test_getitem_setitem_ix_duplicates(self):\n # #1201\n df = DataFrame(np.random.randn(5, 3),\n index=['foo', 'foo', 'bar', 'baz', 'bar'])\n\n result = df.ix['foo']\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.ix['bar']\n expected = df.ix[[2, 4]]\n assert_frame_equal(result, expected)\n\n result = df.ix['baz']\n expected = df.ix[3]\n assert_series_equal(result, expected)\n\n def test_getitem_ix_boolean_duplicates_multiple(self):\n # #1201\n df = DataFrame(np.random.randn(5, 3),\n index=['foo', 'foo', 'bar', 'baz', 'bar'])\n\n result = df.ix[['bar']]\n exp = df.ix[[2, 4]]\n assert_frame_equal(result, exp)\n\n result = df.ix[df[1] > 0]\n exp = df[df[1] > 0]\n assert_frame_equal(result, exp)\n\n result = df.ix[df[0] > 0]\n exp = df[df[0] > 0]\n assert_frame_equal(result, exp)\n\n def test_getitem_setitem_ix_bool_keyerror(self):\n # #2199\n df = DataFrame({'a': [1, 2, 3]})\n\n self.assertRaises(KeyError, df.ix.__getitem__, False)\n self.assertRaises(KeyError, df.ix.__getitem__, True)\n\n self.assertRaises(KeyError, df.ix.__setitem__, False, 0)\n self.assertRaises(KeyError, df.ix.__setitem__, True, 0)\n\n def test_getitem_list_duplicates(self):\n # #1943\n df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))\n df.columns.name = 'foo'\n\n result = df[['B', 'C']]\n self.assertEqual(result.columns.name, 'foo')\n\n expected = df.ix[:, 2:]\n assert_frame_equal(result, expected)\n\n def test_get_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n result = self.frame.get_value(idx, col)\n expected = self.frame[col][idx]\n assert_almost_equal(result, expected)\n\n def test_iteritems(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])\n for k, v in compat.iteritems(df):\n self.assertEqual(type(v), Series)\n\n def test_lookup(self):\n def alt(df, rows, cols):\n result = []\n for r, c in zip(rows, cols):\n result.append(df.get_value(r, c))\n return result\n\n def testit(df):\n rows = list(df.index) * len(df.columns)\n cols = list(df.columns) * len(df.index)\n result = df.lookup(rows, cols)\n expected = alt(df, rows, cols)\n assert_almost_equal(result, expected)\n\n testit(self.mixed_frame)\n testit(self.frame)\n\n df = DataFrame({'label': ['a', 'b', 'a', 'c'],\n 'mask_a': [True, True, False, True],\n 'mask_b': [True, False, False, False],\n 'mask_c': [False, True, False, True]})\n df['mask'] = df.lookup(df.index, 'mask_' + df['label'])\n exp_mask = alt(df, df.index, 'mask_' + df['label'])\n assert_almost_equal(df['mask'], exp_mask)\n self.assertEqual(df['mask'].dtype, np.bool_)\n\n with tm.assertRaises(KeyError):\n self.frame.lookup(['xyz'], ['A'])\n\n with tm.assertRaises(KeyError):\n self.frame.lookup([self.frame.index[0]], ['xyz'])\n\n with tm.assertRaisesRegexp(ValueError, 'same size'):\n self.frame.lookup(['a', 'b', 'c'], ['a'])\n\n def test_set_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n self.frame.set_value(idx, col, 1)\n assert_almost_equal(self.frame[col][idx], 1)\n\n def test_set_value_resize(self):\n\n res = self.frame.set_value('foobar', 'B', 0)\n self.assertIs(res, self.frame)\n self.assertEqual(res.index[-1], 'foobar')\n self.assertEqual(res.get_value('foobar', 'B'), 0)\n\n self.frame.loc['foobar','qux'] = 0\n self.assertEqual(self.frame.get_value('foobar', 'qux'), 0)\n\n res = self.frame.copy()\n res3 = res.set_value('foobar', 'baz', 'sam')\n self.assertEqual(res3['baz'].dtype, np.object_)\n\n res = self.frame.copy()\n res3 = res.set_value('foobar', 'baz', True)\n self.assertEqual(res3['baz'].dtype, np.object_)\n\n res = self.frame.copy()\n res3 = res.set_value('foobar', 'baz', 5)\n self.assertTrue(com.is_float_dtype(res3['baz']))\n self.assertTrue(isnull(res3['baz'].drop(['foobar'])).all())\n self.assertRaises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')\n\n def test_set_value_with_index_dtype_change(self):\n df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))\n\n # this is actually ambiguous as the 2 is interpreted as a positional\n # so column is not created\n df = df_orig.copy()\n df.set_value('C', 2, 1.0)\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n #self.assertEqual(list(df.columns), list(df_orig.columns) + [2])\n\n df = df_orig.copy()\n df.loc['C', 2] = 1.0\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n #self.assertEqual(list(df.columns), list(df_orig.columns) + [2])\n\n # create both new\n df = df_orig.copy()\n df.set_value('C', 'D', 1.0)\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])\n\n df = df_orig.copy()\n df.loc['C', 'D'] = 1.0\n self.assertEqual(list(df.index), list(df_orig.index) + ['C'])\n self.assertEqual(list(df.columns), list(df_orig.columns) + ['D'])\n\n def test_get_set_value_no_partial_indexing(self):\n # partial w/ MultiIndex raise exception\n index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])\n df = DataFrame(index=index, columns=lrange(4))\n self.assertRaises(KeyError, df.get_value, 0, 1)\n # self.assertRaises(KeyError, df.set_value, 0, 1, 0)\n\n def test_single_element_ix_dont_upcast(self):\n self.frame['E'] = 1\n self.assertTrue(issubclass(self.frame['E'].dtype.type,\n (int, np.integer)))\n\n result = self.frame.ix[self.frame.index[5], 'E']\n self.assertTrue(com.is_integer(result))\n\n def test_irow(self):\n df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))\n\n result = df.irow(1)\n exp = df.ix[2]\n assert_series_equal(result, exp)\n\n result = df.irow(2)\n exp = df.ix[4]\n assert_series_equal(result, exp)\n\n # slice\n result = df.irow(slice(4, 8))\n expected = df.ix[8:14]\n assert_frame_equal(result, expected)\n\n # verify slice is view\n result[2] = 0.\n exp_col = df[2].copy()\n exp_col[4:8] = 0.\n assert_series_equal(df[2], exp_col)\n\n # list of integers\n result = df.irow([1, 2, 4, 6])\n expected = df.reindex(df.index[[1, 2, 4, 6]])\n assert_frame_equal(result, expected)\n\n def test_icol(self):\n df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))\n\n result = df.icol(1)\n exp = df.ix[:, 2]\n assert_series_equal(result, exp)\n\n result = df.icol(2)\n exp = df.ix[:, 4]\n assert_series_equal(result, exp)\n\n # slice\n result = df.icol(slice(4, 8))\n expected = df.ix[:, 8:14]\n assert_frame_equal(result, expected)\n\n # verify slice is view\n result[8] = 0.\n self.assertTrue((df[8] == 0).all())\n\n # list of integers\n result = df.icol([1, 2, 4, 6])\n expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])\n assert_frame_equal(result, expected)\n\n def test_irow_icol_duplicates(self):\n df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),\n index=list('aab'))\n\n result = df.irow(0)\n result2 = df.ix[0]\n tm.assert_isinstance(result, Series)\n assert_almost_equal(result.values, df.values[0])\n assert_series_equal(result, result2)\n\n result = df.T.icol(0)\n result2 = df.T.ix[:, 0]\n tm.assert_isinstance(result, Series)\n assert_almost_equal(result.values, df.values[0])\n assert_series_equal(result, result2)\n\n # multiindex\n df = DataFrame(np.random.randn(3, 3), columns=[['i', 'i', 'j'],\n ['A', 'A', 'B']],\n index=[['i', 'i', 'j'], ['X', 'X', 'Y']])\n rs = df.irow(0)\n xp = df.ix[0]\n assert_series_equal(rs, xp)\n\n rs = df.icol(0)\n xp = df.T.ix[0]\n assert_series_equal(rs, xp)\n\n rs = df.icol([0])\n xp = df.ix[:, [0]]\n assert_frame_equal(rs, xp)\n\n # #2259\n df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])\n result = df.icol([0])\n expected = df.take([0], axis=1)\n assert_frame_equal(result, expected)\n\n def test_icol_sparse_propegate_fill_value(self):\n from pandas.sparse.api import SparseDataFrame\n df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)\n self.assertTrue(len(df['A'].sp_values) == len(df.icol(0).sp_values))\n\n def test_iget_value(self):\n for i, row in enumerate(self.frame.index):\n for j, col in enumerate(self.frame.columns):\n result = self.frame.iget_value(i, j)\n expected = self.frame.get_value(row, col)\n assert_almost_equal(result, expected)\n\n def test_nested_exception(self):\n # Ignore the strange way of triggering the problem\n # (which may get fixed), it's just a way to trigger\n # the issue or reraising an outer exception without\n # a named argument\n df = DataFrame({\"a\": [1, 2, 3], \"b\": [4, 5, 6], \"c\": [7, 8,\n 9]}).set_index([\"a\", \"b\"])\n l = list(df.index)\n l[0] = [\"a\", \"b\"]\n df.index = l\n\n try:\n repr(df)\n except Exception as e:\n self.assertNotEqual(type(e), UnboundLocalError)\n\n def test_reverse_reindex_ffill_raises(self):\n dr = pd.date_range('2013-08-01', periods=6, freq='B')\n data = np.random.randn(6,1)\n df = pd.DataFrame(data, index=dr, columns=list('A'))\n df['A'][3] = np.nan\n df_rev = pd.DataFrame(data, index=dr[::-1], columns=list('A'))\n # Reverse index is not 'monotonic'\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='pad')\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='ffill')\n self.assertRaises(ValueError, df_rev.reindex, df.index, method='bfill')\n\n def test_reversed_reindex_ffill_raises(self):\n dr = pd.date_range('2013-08-01', periods=6, freq='B')\n data = np.random.randn(6,1)\n df = pd.DataFrame(data, index=dr, columns=list('A'))\n df['A'][3] = np.nan\n df = pd.DataFrame(data, index=dr, columns=list('A'))\n # Reversed reindex is not 'monotonic'\n self.assertRaises(ValueError, df.reindex, dr[::-1], method='pad')\n self.assertRaises(ValueError, df.reindex, dr[::-1], method='ffill')\n self.assertRaises(ValueError, df.reindex, dr[::-1], method='bfill')\n\n def test_getitem_ix_float_duplicates(self):\n df = pd.DataFrame(np.random.randn(3, 3),\n index=[0.1, 0.2, 0.2], columns=list('abc'))\n expect = df.iloc[1:]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[1:, 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n df.index = [1, 0.2, 0.2]\n expect = df.iloc[1:]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[1:, 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n df = pd.DataFrame(np.random.randn(4, 3),\n index=[1, 0.2, 0.2, 1], columns=list('abc'))\n expect = df.iloc[1:-1]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[1:-1, 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n df.index = [0.1, 0.2, 2, 0.2]\n expect = df.iloc[[1, -1]]\n tm.assert_frame_equal(df.loc[0.2], expect)\n tm.assert_frame_equal(df.ix[0.2], expect)\n\n expect = df.iloc[[1, -1], 0]\n tm.assert_series_equal(df.loc[0.2, 'a'], expect)\n\n\n_seriesd = tm.getSeriesData()\n_tsd = tm.getTimeSeriesData()\n\n_frame = DataFrame(_seriesd)\n_frame2 = DataFrame(_seriesd, columns=['D', 'C', 'B', 'A'])\n_intframe = DataFrame(dict((k, v.astype(int))\n for k, v in compat.iteritems(_seriesd)))\n\n_tsframe = DataFrame(_tsd)\n\n_mixed_frame = _frame.copy()\n_mixed_frame['foo'] = 'bar'\n\n\nclass SafeForSparse(object):\n\n _multiprocess_can_split_ = True\n\n def test_copy_index_name_checking(self):\n # don't want to be able to modify the index stored elsewhere after\n # making a copy\n for attr in ('index', 'columns'):\n ind = getattr(self.frame, attr)\n ind.name = None\n cp = self.frame.copy()\n getattr(cp, attr).name = 'foo'\n self.assertIsNone(getattr(self.frame, attr).name)\n\n def test_getitem_pop_assign_name(self):\n s = self.frame['A']\n self.assertEqual(s.name, 'A')\n\n s = self.frame.pop('A')\n self.assertEqual(s.name, 'A')\n\n s = self.frame.ix[:, 'B']\n self.assertEqual(s.name, 'B')\n\n s2 = s.ix[:]\n self.assertEqual(s2.name, 'B')\n\n def test_get_value(self):\n for idx in self.frame.index:\n for col in self.frame.columns:\n result = self.frame.get_value(idx, col)\n expected = self.frame[col][idx]\n assert_almost_equal(result, expected)\n\n def test_join_index(self):\n # left / right\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2)\n self.assertTrue(f.index.equals(joined.index))\n self.assertEqual(len(joined.columns), 4)\n\n joined = f.join(f2, how='left')\n self.assertTrue(joined.index.equals(f.index))\n self.assertEqual(len(joined.columns), 4)\n\n joined = f.join(f2, how='right')\n self.assertTrue(joined.index.equals(f2.index))\n self.assertEqual(len(joined.columns), 4)\n\n # inner\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2, how='inner')\n self.assertTrue(joined.index.equals(f.index.intersection(f2.index)))\n self.assertEqual(len(joined.columns), 4)\n\n # outer\n\n f = self.frame.reindex(columns=['A', 'B'])[:10]\n f2 = self.frame.reindex(columns=['C', 'D'])\n\n joined = f.join(f2, how='outer')\n self.assertTrue(tm.equalContents(self.frame.index, joined.index))\n self.assertEqual(len(joined.columns), 4)\n\n assertRaisesRegexp(ValueError, 'join method', f.join, f2, how='foo')\n\n # corner case - overlapping columns\n for how in ('outer', 'left', 'inner'):\n with assertRaisesRegexp(ValueError, 'columns overlap but no suffix'):\n self.frame.join(self.frame, how=how)\n\n def test_join_index_more(self):\n af = self.frame.ix[:, ['A', 'B']]\n bf = self.frame.ix[::2, ['C', 'D']]\n\n expected = af.copy()\n expected['C'] = self.frame['C'][::2]\n expected['D'] = self.frame['D'][::2]\n\n result = af.join(bf)\n assert_frame_equal(result, expected)\n\n result = af.join(bf, how='right')\n assert_frame_equal(result, expected[::2])\n\n result = bf.join(af, how='right')\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n def test_join_index_series(self):\n df = self.frame.copy()\n s = df.pop(self.frame.columns[-1])\n joined = df.join(s)\n\n assert_frame_equal(joined, self.frame, check_names=False) # TODO should this check_names ?\n\n s.name = None\n assertRaisesRegexp(ValueError, 'must have a name', df.join, s)\n\n def test_join_overlap(self):\n df1 = self.frame.ix[:, ['A', 'B', 'C']]\n df2 = self.frame.ix[:, ['B', 'C', 'D']]\n\n joined = df1.join(df2, lsuffix='_df1', rsuffix='_df2')\n df1_suf = df1.ix[:, ['B', 'C']].add_suffix('_df1')\n df2_suf = df2.ix[:, ['B', 'C']].add_suffix('_df2')\n no_overlap = self.frame.ix[:, ['A', 'D']]\n expected = df1_suf.join(df2_suf).join(no_overlap)\n\n # column order not necessarily sorted\n assert_frame_equal(joined, expected.ix[:, joined.columns])\n\n def test_add_prefix_suffix(self):\n with_prefix = self.frame.add_prefix('foo#')\n expected = ['foo#%s' % c for c in self.frame.columns]\n self.assert_numpy_array_equal(with_prefix.columns, expected)\n\n with_suffix = self.frame.add_suffix('#foo')\n expected = ['%s#foo' % c for c in self.frame.columns]\n self.assert_numpy_array_equal(with_suffix.columns, expected)\n\n\nclass TestDataFrame(tm.TestCase, CheckIndexing,\n SafeForSparse):\n klass = DataFrame\n\n _multiprocess_can_split_ = True\n\n def setUp(self):\n import warnings\n warnings.filterwarnings(action='ignore', category=FutureWarning)\n\n self.frame = _frame.copy()\n self.frame2 = _frame2.copy()\n\n # force these all to int64 to avoid platform testing issues\n self.intframe = DataFrame(dict([ (c,s) for c,s in compat.iteritems(_intframe) ]), dtype = np.int64)\n self.tsframe = _tsframe.copy()\n self.mixed_frame = _mixed_frame.copy()\n self.mixed_float = DataFrame({ 'A': _frame['A'].copy().astype('float32'),\n 'B': _frame['B'].copy().astype('float32'),\n 'C': _frame['C'].copy().astype('float16'),\n 'D': _frame['D'].copy().astype('float64') })\n self.mixed_float2 = DataFrame({ 'A': _frame2['A'].copy().astype('float32'),\n 'B': _frame2['B'].copy().astype('float32'),\n 'C': _frame2['C'].copy().astype('float16'),\n 'D': _frame2['D'].copy().astype('float64') })\n self.mixed_int = DataFrame({ 'A': _intframe['A'].copy().astype('int32'),\n 'B': np.ones(len(_intframe['B']),dtype='uint64'),\n 'C': _intframe['C'].copy().astype('uint8'),\n 'D': _intframe['D'].copy().astype('int64') })\n self.all_mixed = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'float32' : np.array([1.]*10,dtype='float32'),\n 'int32' : np.array([1]*10,dtype='int32'),\n }, index=np.arange(10))\n\n self.ts1 = tm.makeTimeSeries()\n self.ts2 = tm.makeTimeSeries()[5:]\n self.ts3 = tm.makeTimeSeries()[-5:]\n self.ts4 = tm.makeTimeSeries()[1:-1]\n\n self.ts_dict = {\n 'col1': self.ts1,\n 'col2': self.ts2,\n 'col3': self.ts3,\n 'col4': self.ts4,\n }\n self.empty = DataFrame({})\n\n arr = np.array([[1., 2., 3.],\n [4., 5., 6.],\n [7., 8., 9.]])\n\n self.simple = DataFrame(arr, columns=['one', 'two', 'three'],\n index=['a', 'b', 'c'])\n\n def test_get_axis(self):\n f = self.frame\n self.assertEqual(f._get_axis_number(0), 0)\n self.assertEqual(f._get_axis_number(1), 1)\n self.assertEqual(f._get_axis_number('index'), 0)\n self.assertEqual(f._get_axis_number('rows'), 0)\n self.assertEqual(f._get_axis_number('columns'), 1)\n\n self.assertEqual(f._get_axis_name(0), 'index')\n self.assertEqual(f._get_axis_name(1), 'columns')\n self.assertEqual(f._get_axis_name('index'), 'index')\n self.assertEqual(f._get_axis_name('rows'), 'index')\n self.assertEqual(f._get_axis_name('columns'), 'columns')\n\n self.assertIs(f._get_axis(0), f.index)\n self.assertIs(f._get_axis(1), f.columns)\n\n assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, 2)\n assertRaisesRegexp(ValueError, 'No axis.*foo', f._get_axis_name, 'foo')\n assertRaisesRegexp(ValueError, 'No axis.*None', f._get_axis_name, None)\n assertRaisesRegexp(ValueError, 'No axis named', f._get_axis_number, None)\n\n def test_set_index(self):\n idx = Index(np.arange(len(self.mixed_frame)))\n\n # cache it\n _ = self.mixed_frame['foo']\n self.mixed_frame.index = idx\n self.assertIs(self.mixed_frame['foo'].index, idx)\n with assertRaisesRegexp(ValueError, 'Length mismatch'):\n self.mixed_frame.index = idx[::2]\n\n def test_set_index_cast(self):\n\n # issue casting an index then set_index\n df = DataFrame({'A' : [1.1,2.2,3.3], 'B' : [5.0,6.1,7.2]},\n index = [2010,2011,2012])\n expected = df.ix[2010]\n new_index = df.index.astype(np.int32)\n df.index = new_index\n result = df.ix[2010]\n assert_series_equal(result,expected)\n\n def test_set_index2(self):\n df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],\n 'B': ['one', 'two', 'three', 'one', 'two'],\n 'C': ['a', 'b', 'c', 'd', 'e'],\n 'D': np.random.randn(5),\n 'E': np.random.randn(5)})\n\n # new object, single-column\n result = df.set_index('C')\n result_nodrop = df.set_index('C', drop=False)\n\n index = Index(df['C'], name='C')\n\n expected = df.ix[:, ['A', 'B', 'D', 'E']]\n expected.index = index\n\n expected_nodrop = df.copy()\n expected_nodrop.index = index\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result_nodrop, expected_nodrop)\n self.assertEqual(result.index.name, index.name)\n\n # inplace, single\n df2 = df.copy()\n\n df2.set_index('C', inplace=True)\n\n assert_frame_equal(df2, expected)\n\n df3 = df.copy()\n df3.set_index('C', drop=False, inplace=True)\n\n assert_frame_equal(df3, expected_nodrop)\n\n # create new object, multi-column\n result = df.set_index(['A', 'B'])\n result_nodrop = df.set_index(['A', 'B'], drop=False)\n\n index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B'])\n\n expected = df.ix[:, ['C', 'D', 'E']]\n expected.index = index\n\n expected_nodrop = df.copy()\n expected_nodrop.index = index\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result_nodrop, expected_nodrop)\n self.assertEqual(result.index.names, index.names)\n\n # inplace\n df2 = df.copy()\n df2.set_index(['A', 'B'], inplace=True)\n assert_frame_equal(df2, expected)\n\n df3 = df.copy()\n df3.set_index(['A', 'B'], drop=False, inplace=True)\n assert_frame_equal(df3, expected_nodrop)\n\n # corner case\n with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):\n df.set_index('A', verify_integrity=True)\n\n # append\n result = df.set_index(['A', 'B'], append=True)\n xp = df.reset_index().set_index(['index', 'A', 'B'])\n xp.index.names = [None, 'A', 'B']\n assert_frame_equal(result, xp)\n\n # append to existing multiindex\n rdf = df.set_index(['A'], append=True)\n rdf = rdf.set_index(['B', 'C'], append=True)\n expected = df.set_index(['A', 'B', 'C'], append=True)\n assert_frame_equal(rdf, expected)\n\n # Series\n result = df.set_index(df.C)\n self.assertEqual(result.index.name, 'C')\n\n def test_set_index_nonuniq(self):\n df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'],\n 'B': ['one', 'two', 'three', 'one', 'two'],\n 'C': ['a', 'b', 'c', 'd', 'e'],\n 'D': np.random.randn(5),\n 'E': np.random.randn(5)})\n with assertRaisesRegexp(ValueError, 'Index has duplicate keys'):\n df.set_index('A', verify_integrity=True, inplace=True)\n self.assertIn('A', df)\n\n def test_set_index_bug(self):\n # GH1590\n df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']})\n df2 = df.select(lambda indx: indx >= 1)\n rs = df2.set_index('key')\n xp = DataFrame({'val': [1, 2]},\n Index(['b', 'c'], name='key'))\n assert_frame_equal(rs, xp)\n\n def test_set_index_pass_arrays(self):\n df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'foo', 'foo'],\n 'B': ['one', 'one', 'two', 'three',\n 'two', 'two', 'one', 'three'],\n 'C': np.random.randn(8),\n 'D': np.random.randn(8)})\n\n # multiple columns\n result = df.set_index(['A', df['B'].values], drop=False)\n expected = df.set_index(['A', 'B'], drop=False)\n assert_frame_equal(result, expected, check_names=False) # TODO should set_index check_names ?\n\n def test_set_index_cast_datetimeindex(self):\n df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i)\n for i in range(1000)],\n 'B': np.random.randn(1000)})\n\n idf = df.set_index('A')\n tm.assert_isinstance(idf.index, DatetimeIndex)\n\n # don't cast a DatetimeIndex WITH a tz, leave as object\n # GH 6032\n i = pd.DatetimeIndex(pd.tseries.tools.to_datetime(['2013-1-1 13:00','2013-1-2 14:00'], errors=\"raise\")).tz_localize('US/Pacific')\n df = DataFrame(np.random.randn(2,1),columns=['A'])\n\n expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'),\n pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')], dtype=\"object\"))\n\n # convert index to series\n result = Series(i)\n assert_series_equal(result, expected)\n\n # assignt to frame\n df['B'] = i\n result = df['B']\n assert_series_equal(result, expected)\n\n # keep the timezone\n result = i.to_series(keep_tz=True)\n assert_series_equal(result.reset_index(drop=True), expected)\n\n # convert to utc\n df['C'] = i.to_series().reset_index(drop=True)\n result = df['C']\n comp = DatetimeIndex(expected.values).copy()\n comp.tz = None\n self.assert_numpy_array_equal(result.values, comp.values)\n\n # list of datetimes with a tz\n df['D'] = i.to_pydatetime()\n result = df['D']\n assert_series_equal(result, expected)\n\n # GH 6785\n # set the index manually\n import pytz\n df = DataFrame([{'ts':datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo':1}])\n expected = df.set_index('ts')\n df.index = df['ts']\n df.pop('ts')\n assert_frame_equal(df, expected)\n\n # GH 3950\n # reset_index with single level\n for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']:\n idx = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx')\n df = pd.DataFrame({'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx)\n\n expected = pd.DataFrame({'idx': [datetime(2011, 1, 1), datetime(2011, 1, 2),\n datetime(2011, 1, 3), datetime(2011, 1, 4),\n datetime(2011, 1, 5)],\n 'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']},\n columns=['idx', 'a', 'b'])\n expected['idx'] = expected['idx'].apply(lambda d: pd.Timestamp(d, tz=tz))\n assert_frame_equal(df.reset_index(), expected)\n\n def test_set_index_multiindexcolumns(self):\n columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)])\n df = DataFrame(np.random.randn(3, 3), columns=columns)\n rs = df.set_index(df.columns[0])\n xp = df.ix[:, 1:]\n xp.index = df.ix[:, 0].values\n xp.index.names = [df.columns[0]]\n assert_frame_equal(rs, xp)\n\n def test_set_index_empty_column(self):\n # #1971\n df = DataFrame([\n dict(a=1, p=0),\n dict(a=2, m=10),\n dict(a=3, m=11, p=20),\n dict(a=4, m=12, p=21)\n ], columns=('a', 'm', 'p', 'x'))\n\n # it works!\n result = df.set_index(['a', 'x'])\n repr(result)\n\n def test_set_columns(self):\n cols = Index(np.arange(len(self.mixed_frame.columns)))\n self.mixed_frame.columns = cols\n with assertRaisesRegexp(ValueError, 'Length mismatch'):\n self.mixed_frame.columns = cols[::2]\n\n def test_keys(self):\n getkeys = self.frame.keys\n self.assertIs(getkeys(), self.frame.columns)\n\n def test_column_contains_typeerror(self):\n try:\n self.frame.columns in self.frame\n except TypeError:\n pass\n\n def test_constructor(self):\n df = DataFrame()\n self.assertEqual(len(df.index), 0)\n\n df = DataFrame(data={})\n self.assertEqual(len(df.index), 0)\n\n def test_constructor_mixed(self):\n index, data = tm.getMixedTypeDict()\n\n indexed_frame = DataFrame(data, index=index)\n unindexed_frame = DataFrame(data)\n\n self.assertEqual(self.mixed_frame['foo'].dtype, np.object_)\n\n def test_constructor_cast_failure(self):\n foo = DataFrame({'a': ['a', 'b', 'c']}, dtype=np.float64)\n self.assertEqual(foo['a'].dtype, object)\n\n # GH 3010, constructing with odd arrays\n df = DataFrame(np.ones((4,2)))\n\n # this is ok\n df['foo'] = np.ones((4,2)).tolist()\n\n # this is not ok\n self.assertRaises(ValueError, df.__setitem__, tuple(['test']), np.ones((4,2)))\n\n # this is ok\n df['foo2'] = np.ones((4,2)).tolist()\n\n def test_constructor_dtype_nocast_view(self):\n df = DataFrame([[1, 2]])\n should_be_view = DataFrame(df, dtype=df[0].dtype)\n should_be_view[0][0] = 99\n self.assertEqual(df.values[0, 0], 99)\n\n should_be_view = DataFrame(df.values, dtype=df[0].dtype)\n should_be_view[0][0] = 97\n self.assertEqual(df.values[0, 0], 97)\n\n def test_constructor_dtype_list_data(self):\n df = DataFrame([[1, '2'],\n [None, 'a']], dtype=object)\n self.assertIsNone(df.ix[1, 0])\n self.assertEqual(df.ix[0, 1], '2')\n\n def test_constructor_list_frames(self):\n\n # GH 3243\n result = DataFrame([DataFrame([])])\n self.assertEqual(result.shape, (1,0))\n\n result = DataFrame([DataFrame(dict(A = lrange(5)))])\n tm.assert_isinstance(result.iloc[0,0], DataFrame)\n\n def test_constructor_mixed_dtypes(self):\n\n def _make_mixed_dtypes_df(typ, ad = None):\n\n if typ == 'int':\n dtypes = MIXED_INT_DTYPES\n arrays = [ np.array(np.random.rand(10), dtype = d) for d in dtypes ]\n elif typ == 'float':\n dtypes = MIXED_FLOAT_DTYPES\n arrays = [ np.array(np.random.randint(10, size=10), dtype = d) for d in dtypes ]\n\n zipper = lzip(dtypes,arrays)\n for d,a in zipper:\n assert(a.dtype == d)\n if ad is None:\n ad = dict()\n ad.update(dict([ (d,a) for d,a in zipper ]))\n return DataFrame(ad)\n\n def _check_mixed_dtypes(df, dtypes = None):\n if dtypes is None:\n dtypes = MIXED_FLOAT_DTYPES + MIXED_INT_DTYPES\n for d in dtypes:\n if d in df:\n assert(df.dtypes[d] == d)\n\n # mixed floating and integer coexinst in the same frame\n df = _make_mixed_dtypes_df('float')\n _check_mixed_dtypes(df)\n\n # add lots of types\n df = _make_mixed_dtypes_df('float', dict(A = 1, B = 'foo', C = 'bar'))\n _check_mixed_dtypes(df)\n\n # GH 622\n df = _make_mixed_dtypes_df('int')\n _check_mixed_dtypes(df)\n\n def test_constructor_rec(self):\n rec = self.frame.to_records(index=False)\n\n # Assigning causes segfault in NumPy < 1.5.1\n # rec.dtype.names = list(rec.dtype.names)[::-1]\n\n index = self.frame.index\n\n df = DataFrame(rec)\n self.assert_numpy_array_equal(df.columns, rec.dtype.names)\n\n df2 = DataFrame(rec, index=index)\n self.assert_numpy_array_equal(df2.columns, rec.dtype.names)\n self.assertTrue(df2.index.equals(index))\n\n rng = np.arange(len(rec))[::-1]\n df3 = DataFrame(rec, index=rng, columns=['C', 'B'])\n expected = DataFrame(rec, index=rng).reindex(columns=['C', 'B'])\n assert_frame_equal(df3, expected)\n\n def test_constructor_bool(self):\n df = DataFrame({0: np.ones(10, dtype=bool),\n 1: np.zeros(10, dtype=bool)})\n self.assertEqual(df.values.dtype, np.bool_)\n\n def test_constructor_overflow_int64(self):\n values = np.array([2 ** 64 - i for i in range(1, 10)],\n dtype=np.uint64)\n\n result = DataFrame({'a': values})\n self.assertEqual(result['a'].dtype, object)\n\n # #2355\n data_scores = [(6311132704823138710, 273), (2685045978526272070, 23),\n (8921811264899370420, 45), (long(17019687244989530680), 270),\n (long(9930107427299601010), 273)]\n dtype = [('uid', 'u8'), ('score', 'u8')]\n data = np.zeros((len(data_scores),), dtype=dtype)\n data[:] = data_scores\n df_crawls = DataFrame(data)\n self.assertEqual(df_crawls['uid'].dtype, object)\n\n def test_constructor_ordereddict(self):\n import random\n nitems = 100\n nums = lrange(nitems)\n random.shuffle(nums)\n expected = ['A%d' % i for i in nums]\n df = DataFrame(OrderedDict(zip(expected, [[0]] * nitems)))\n self.assertEqual(expected, list(df.columns))\n\n def test_constructor_dict(self):\n frame = DataFrame({'col1': self.ts1,\n 'col2': self.ts2})\n\n tm.assert_dict_equal(self.ts1, frame['col1'], compare_keys=False)\n tm.assert_dict_equal(self.ts2, frame['col2'], compare_keys=False)\n\n frame = DataFrame({'col1': self.ts1,\n 'col2': self.ts2},\n columns=['col2', 'col3', 'col4'])\n\n self.assertEqual(len(frame), len(self.ts2))\n self.assertNotIn('col1', frame)\n self.assertTrue(isnull(frame['col3']).all())\n\n # Corner cases\n self.assertEqual(len(DataFrame({})), 0)\n\n # mix dict and array, wrong size - no spec for which error should raise\n # first\n with tm.assertRaises(ValueError):\n DataFrame({'A': {'a': 'a', 'b': 'b'}, 'B': ['a', 'b', 'c']})\n\n # Length-one dict micro-optimization\n frame = DataFrame({'A': {'1': 1, '2': 2}})\n self.assert_numpy_array_equal(frame.index, ['1', '2'])\n\n # empty dict plus index\n idx = Index([0, 1, 2])\n frame = DataFrame({}, index=idx)\n self.assertIs(frame.index, idx)\n\n # empty with index and columns\n idx = Index([0, 1, 2])\n frame = DataFrame({}, index=idx, columns=idx)\n self.assertIs(frame.index, idx)\n self.assertIs(frame.columns, idx)\n self.assertEqual(len(frame._series), 3)\n\n # with dict of empty list and Series\n frame = DataFrame({'A': [], 'B': []}, columns=['A', 'B'])\n self.assertTrue(frame.index.equals(Index([])))\n\n def test_constructor_multi_index(self):\n # GH 4078\n # construction error with mi and all-nan frame\n tuples = [(2, 3), (3, 3), (3, 3)]\n mi = MultiIndex.from_tuples(tuples)\n df = DataFrame(index=mi,columns=mi)\n self.assertTrue(pd.isnull(df).values.ravel().all())\n\n tuples = [(3, 3), (2, 3), (3, 3)]\n mi = MultiIndex.from_tuples(tuples)\n df = DataFrame(index=mi,columns=mi)\n self.assertTrue(pd.isnull(df).values.ravel().all())\n\n def test_constructor_error_msgs(self):\n msg = \"Mixing dicts with non-Series may lead to ambiguous ordering.\"\n # mix dict and array, wrong size\n with assertRaisesRegexp(ValueError, msg):\n DataFrame({'A': {'a': 'a', 'b': 'b'},\n 'B': ['a', 'b', 'c']})\n\n # wrong size ndarray, GH 3105\n msg = \"Shape of passed values is \\(3, 4\\), indices imply \\(3, 3\\)\"\n with assertRaisesRegexp(ValueError, msg):\n DataFrame(np.arange(12).reshape((4, 3)),\n columns=['foo', 'bar', 'baz'],\n index=date_range('2000-01-01', periods=3))\n\n\n # higher dim raise exception\n with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):\n DataFrame(np.zeros((3, 3, 3)), columns=['A', 'B', 'C'], index=[1])\n\n # wrong size axis labels\n with assertRaisesRegexp(ValueError, \"Shape of passed values is \\(3, 2\\), indices imply \\(3, 1\\)\"):\n DataFrame(np.random.rand(2,3), columns=['A', 'B', 'C'], index=[1])\n\n with assertRaisesRegexp(ValueError, \"Shape of passed values is \\(3, 2\\), indices imply \\(2, 2\\)\"):\n DataFrame(np.random.rand(2,3), columns=['A', 'B'], index=[1, 2])\n\n with assertRaisesRegexp(ValueError, 'If using all scalar values, you must must pass an index'):\n DataFrame({'a': False, 'b': True})\n\n def test_constructor_with_embedded_frames(self):\n\n # embedded data frames\n df1 = DataFrame({'a':[1, 2, 3], 'b':[3, 4, 5]})\n df2 = DataFrame([df1, df1+10])\n\n df2.dtypes\n str(df2)\n\n result = df2.loc[0,0]\n assert_frame_equal(result,df1)\n\n result = df2.loc[1,0]\n assert_frame_equal(result,df1+10)\n\n def test_insert_error_msmgs(self):\n\n # GH 7432\n df = DataFrame({'foo':['a', 'b', 'c'], 'bar':[1,2,3], 'baz':['d','e','f']}).set_index('foo')\n s = DataFrame({'foo':['a', 'b', 'c', 'a'], 'fiz':['g','h','i','j']}).set_index('foo')\n msg = 'cannot reindex from a duplicate axis'\n with assertRaisesRegexp(ValueError, msg):\n df['newcol'] = s\n\n # GH 4107, more descriptive error message\n df = DataFrame(np.random.randint(0,2,(4,4)),\n columns=['a', 'b', 'c', 'd'])\n\n msg = 'incompatible index of inserted column with frame index'\n with assertRaisesRegexp(TypeError, msg):\n df['gr'] = df.groupby(['b', 'c']).count()\n\n def test_constructor_subclass_dict(self):\n # Test for passing dict subclass to constructor\n data = {'col1': tm.TestSubDict((x, 10.0 * x) for x in range(10)),\n 'col2': tm.TestSubDict((x, 20.0 * x) for x in range(10))}\n df = DataFrame(data)\n refdf = DataFrame(dict((col, dict(compat.iteritems(val)))\n for col, val in compat.iteritems(data)))\n assert_frame_equal(refdf, df)\n\n data = tm.TestSubDict(compat.iteritems(data))\n df = DataFrame(data)\n assert_frame_equal(refdf, df)\n\n # try with defaultdict\n from collections import defaultdict\n data = {}\n self.frame['B'][:10] = np.nan\n for k, v in compat.iteritems(self.frame):\n dct = defaultdict(dict)\n dct.update(v.to_dict())\n data[k] = dct\n frame = DataFrame(data)\n assert_frame_equal(self.frame.sort_index(), frame)\n\n def test_constructor_dict_block(self):\n expected = [[4., 3., 2., 1.]]\n df = DataFrame({'d': [4.], 'c': [3.], 'b': [2.], 'a': [1.]},\n columns=['d', 'c', 'b', 'a'])\n assert_almost_equal(df.values, expected)\n\n def test_constructor_dict_cast(self):\n # cast float tests\n test_data = {\n 'A': {'1': 1, '2': 2},\n 'B': {'1': '1', '2': '2', '3': '3'},\n }\n frame = DataFrame(test_data, dtype=float)\n self.assertEqual(len(frame), 3)\n self.assertEqual(frame['B'].dtype, np.float64)\n self.assertEqual(frame['A'].dtype, np.float64)\n\n frame = DataFrame(test_data)\n self.assertEqual(len(frame), 3)\n self.assertEqual(frame['B'].dtype, np.object_)\n self.assertEqual(frame['A'].dtype, np.float64)\n\n # can't cast to float\n test_data = {\n 'A': dict(zip(range(20), tm.makeStringIndex(20))),\n 'B': dict(zip(range(15), randn(15)))\n }\n frame = DataFrame(test_data, dtype=float)\n self.assertEqual(len(frame), 20)\n self.assertEqual(frame['A'].dtype, np.object_)\n self.assertEqual(frame['B'].dtype, np.float64)\n\n def test_constructor_dict_dont_upcast(self):\n d = {'Col1': {'Row1': 'A String', 'Row2': np.nan}}\n df = DataFrame(d)\n tm.assert_isinstance(df['Col1']['Row2'], float)\n\n dm = DataFrame([[1, 2], ['a', 'b']], index=[1, 2], columns=[1, 2])\n tm.assert_isinstance(dm[1][1], int)\n\n def test_constructor_dict_of_tuples(self):\n # GH #1491\n data = {'a': (1, 2, 3), 'b': (4, 5, 6)}\n\n result = DataFrame(data)\n expected = DataFrame(dict((k, list(v)) for k, v in compat.iteritems(data)))\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_dict_multiindex(self):\n check = lambda result, expected: tm.assert_frame_equal(\n result, expected, check_dtype=True, check_index_type=True,\n check_column_type=True, check_names=True)\n d = {('a', 'a'): {('i', 'i'): 0, ('i', 'j'): 1, ('j', 'i'): 2},\n ('b', 'a'): {('i', 'i'): 6, ('i', 'j'): 5, ('j', 'i'): 4},\n ('b', 'c'): {('i', 'i'): 7, ('i', 'j'): 8, ('j', 'i'): 9}}\n _d = sorted(d.items())\n df = DataFrame(d)\n expected = DataFrame(\n [x[1] for x in _d],\n index=MultiIndex.from_tuples([x[0] for x in _d])).T\n expected.index = MultiIndex.from_tuples(expected.index)\n check(df, expected)\n\n d['z'] = {'y': 123., ('i', 'i'): 111, ('i', 'j'): 111, ('j', 'i'): 111}\n _d.insert(0, ('z', d['z']))\n expected = DataFrame(\n [x[1] for x in _d],\n index=Index([x[0] for x in _d], tupleize_cols=False)).T\n expected.index = Index(expected.index, tupleize_cols=False)\n df = DataFrame(d)\n df = df.reindex(columns=expected.columns, index=expected.index)\n check(df, expected)\n\n def _check_basic_constructor(self, empty):\n \"mat: 2d matrix with shpae (3, 2) to input. empty - makes sized objects\"\n mat = empty((2, 3), dtype=float)\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n\n # 1-D input\n frame = DataFrame(empty((3,)), columns=['A'], index=[1, 2, 3])\n self.assertEqual(len(frame.index), 3)\n self.assertEqual(len(frame.columns), 1)\n\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=np.int64)\n self.assertEqual(frame.values.dtype, np.int64)\n\n # wrong size axis labels\n msg = r'Shape of passed values is \\(3, 2\\), indices imply \\(3, 1\\)'\n with assertRaisesRegexp(ValueError, msg):\n DataFrame(mat, columns=['A', 'B', 'C'], index=[1])\n msg = r'Shape of passed values is \\(3, 2\\), indices imply \\(2, 2\\)'\n with assertRaisesRegexp(ValueError, msg):\n DataFrame(mat, columns=['A', 'B'], index=[1, 2])\n\n # higher dim raise exception\n with assertRaisesRegexp(ValueError, 'Must pass 2-d input'):\n DataFrame(empty((3, 3, 3)), columns=['A', 'B', 'C'],\n index=[1])\n\n # automatic labeling\n frame = DataFrame(mat)\n self.assert_numpy_array_equal(frame.index, lrange(2))\n self.assert_numpy_array_equal(frame.columns, lrange(3))\n\n frame = DataFrame(mat, index=[1, 2])\n self.assert_numpy_array_equal(frame.columns, lrange(3))\n\n frame = DataFrame(mat, columns=['A', 'B', 'C'])\n self.assert_numpy_array_equal(frame.index, lrange(2))\n\n # 0-length axis\n frame = DataFrame(empty((0, 3)))\n self.assertEqual(len(frame.index), 0)\n\n frame = DataFrame(empty((3, 0)))\n self.assertEqual(len(frame.columns), 0)\n\n def test_constructor_ndarray(self):\n mat = np.zeros((2, 3), dtype=float)\n self._check_basic_constructor(np.ones)\n\n frame = DataFrame(['foo', 'bar'], index=[0, 1], columns=['A'])\n self.assertEqual(len(frame), 2)\n\n def test_constructor_maskedarray(self):\n self._check_basic_constructor(ma.masked_all)\n\n # Check non-masked values\n mat = ma.masked_all((2, 3), dtype=float)\n mat[0, 0] = 1.0\n mat[1, 2] = 2.0\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(1.0, frame['A'][1])\n self.assertEqual(2.0, frame['C'][2])\n\n # what is this even checking??\n mat = ma.masked_all((2, 3), dtype=float)\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertTrue(np.all(~np.asarray(frame == frame)))\n\n def test_constructor_maskedarray_nonfloat(self):\n # masked int promoted to float\n mat = ma.masked_all((2, 3), dtype=int)\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n self.assertTrue(np.all(~np.asarray(frame == frame)))\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=np.float64)\n self.assertEqual(frame.values.dtype, np.float64)\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = 1\n mat2[1, 2] = 2\n frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(1, frame['A'][1])\n self.assertEqual(2, frame['C'][2])\n\n # masked np.datetime64 stays (use lib.NaT as null)\n mat = ma.masked_all((2, 3), dtype='M8[ns]')\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n self.assertTrue(isnull(frame).values.all())\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=np.int64)\n self.assertEqual(frame.values.dtype, np.int64)\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = 1\n mat2[1, 2] = 2\n frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(1, frame['A'].view('i8')[1])\n self.assertEqual(2, frame['C'].view('i8')[2])\n\n # masked bool promoted to object\n mat = ma.masked_all((2, 3), dtype=bool)\n # 2-D input\n frame = DataFrame(mat, columns=['A', 'B', 'C'], index=[1, 2])\n\n self.assertEqual(len(frame.index), 2)\n self.assertEqual(len(frame.columns), 3)\n self.assertTrue(np.all(~np.asarray(frame == frame)))\n\n # cast type\n frame = DataFrame(mat, columns=['A', 'B', 'C'],\n index=[1, 2], dtype=object)\n self.assertEqual(frame.values.dtype, object)\n\n # Check non-masked values\n mat2 = ma.copy(mat)\n mat2[0, 0] = True\n mat2[1, 2] = False\n frame = DataFrame(mat2, columns=['A', 'B', 'C'], index=[1, 2])\n self.assertEqual(True, frame['A'][1])\n self.assertEqual(False, frame['C'][2])\n\n def test_constructor_mrecarray(self):\n # Ensure mrecarray produces frame identical to dict of masked arrays\n # from GH3479\n\n assert_fr_equal = functools.partial(assert_frame_equal,\n check_index_type=True,\n check_column_type=True,\n check_frame_type=True)\n arrays = [\n ('float', np.array([1.5, 2.0])),\n ('int', np.array([1, 2])),\n ('str', np.array(['abc', 'def'])),\n ]\n for name, arr in arrays[:]:\n arrays.append(('masked1_' + name,\n np.ma.masked_array(arr, mask=[False, True])))\n arrays.append(('masked_all', np.ma.masked_all((2,))))\n arrays.append(('masked_none',\n np.ma.masked_array([1.0, 2.5], mask=False)))\n\n # call assert_frame_equal for all selections of 3 arrays\n for comb in itertools.combinations(arrays, 3):\n names, data = zip(*comb)\n mrecs = mrecords.fromarrays(data, names=names)\n\n # fill the comb\n comb = dict([ (k, v.filled()) if hasattr(v,'filled') else (k, v) for k, v in comb ])\n\n expected = DataFrame(comb,columns=names)\n result = DataFrame(mrecs)\n assert_fr_equal(result,expected)\n\n # specify columns\n expected = DataFrame(comb,columns=names[::-1])\n result = DataFrame(mrecs, columns=names[::-1])\n assert_fr_equal(result,expected)\n\n # specify index\n expected = DataFrame(comb,columns=names,index=[1,2])\n result = DataFrame(mrecs, index=[1,2])\n assert_fr_equal(result,expected)\n\n def test_constructor_corner(self):\n df = DataFrame(index=[])\n self.assertEqual(df.values.shape, (0, 0))\n\n # empty but with specified dtype\n df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=object)\n self.assertEqual(df.values.dtype, np.object_)\n\n # does not error but ends up float\n df = DataFrame(index=lrange(10), columns=['a', 'b'], dtype=int)\n self.assertEqual(df.values.dtype, np.object_)\n\n # #1783 empty dtype object\n df = DataFrame({}, columns=['foo', 'bar'])\n self.assertEqual(df.values.dtype, np.object_)\n\n df = DataFrame({'b': 1}, index=lrange(10), columns=list('abc'),\n dtype=int)\n self.assertEqual(df.values.dtype, np.object_)\n\n\n def test_constructor_scalar_inference(self):\n data = {'int': 1, 'bool': True,\n 'float': 3., 'complex': 4j, 'object': 'foo'}\n df = DataFrame(data, index=np.arange(10))\n\n self.assertEqual(df['int'].dtype, np.int64)\n self.assertEqual(df['bool'].dtype, np.bool_)\n self.assertEqual(df['float'].dtype, np.float64)\n self.assertEqual(df['complex'].dtype, np.complex128)\n self.assertEqual(df['object'].dtype, np.object_)\n\n def test_constructor_arrays_and_scalars(self):\n df = DataFrame({'a': randn(10), 'b': True})\n exp = DataFrame({'a': df['a'].values, 'b': [True] * 10})\n\n assert_frame_equal(df, exp)\n with tm.assertRaisesRegexp(ValueError, 'must pass an index'):\n DataFrame({'a': False, 'b': True})\n\n def test_constructor_DataFrame(self):\n df = DataFrame(self.frame)\n assert_frame_equal(df, self.frame)\n\n df_casted = DataFrame(self.frame, dtype=np.int64)\n self.assertEqual(df_casted.values.dtype, np.int64)\n\n def test_constructor_more(self):\n # used to be in test_matrix.py\n arr = randn(10)\n dm = DataFrame(arr, columns=['A'], index=np.arange(10))\n self.assertEqual(dm.values.ndim, 2)\n\n arr = randn(0)\n dm = DataFrame(arr)\n self.assertEqual(dm.values.ndim, 2)\n self.assertEqual(dm.values.ndim, 2)\n\n # no data specified\n dm = DataFrame(columns=['A', 'B'], index=np.arange(10))\n self.assertEqual(dm.values.shape, (10, 2))\n\n dm = DataFrame(columns=['A', 'B'])\n self.assertEqual(dm.values.shape, (0, 2))\n\n dm = DataFrame(index=np.arange(10))\n self.assertEqual(dm.values.shape, (10, 0))\n\n # corner, silly\n # TODO: Fix this Exception to be better...\n with assertRaisesRegexp(PandasError, 'constructor not properly called'):\n DataFrame((1, 2, 3))\n\n # can't cast\n mat = np.array(['foo', 'bar'], dtype=object).reshape(2, 1)\n with assertRaisesRegexp(ValueError, 'cast'):\n DataFrame(mat, index=[0, 1], columns=[0], dtype=float)\n\n dm = DataFrame(DataFrame(self.frame._series))\n tm.assert_frame_equal(dm, self.frame)\n\n # int cast\n dm = DataFrame({'A': np.ones(10, dtype=int),\n 'B': np.ones(10, dtype=np.float64)},\n index=np.arange(10))\n\n self.assertEqual(len(dm.columns), 2)\n self.assertEqual(dm.values.dtype, np.float64)\n\n def test_constructor_empty_list(self):\n df = DataFrame([], index=[])\n expected = DataFrame(index=[])\n assert_frame_equal(df, expected)\n\n def test_constructor_list_of_lists(self):\n # GH #484\n l = [[1, 'a'], [2, 'b']]\n df = DataFrame(data=l, columns=[\"num\", \"str\"])\n self.assertTrue(com.is_integer_dtype(df['num']))\n self.assertEqual(df['str'].dtype, np.object_)\n\n # GH 4851\n # list of 0-dim ndarrays\n expected = DataFrame({ 0: range(10) })\n data = [np.array(x) for x in range(10)]\n result = DataFrame(data)\n assert_frame_equal(result, expected)\n\n def test_constructor_sequence_like(self):\n # GH 3783\n # collections.Squence like\n import collections\n\n class DummyContainer(collections.Sequence):\n def __init__(self, lst):\n self._lst = lst\n def __getitem__(self, n):\n return self._lst.__getitem__(n)\n def __len__(self, n):\n return self._lst.__len__()\n\n l = [DummyContainer([1, 'a']), DummyContainer([2, 'b'])]\n columns = [\"num\", \"str\"]\n result = DataFrame(l, columns=columns)\n expected = DataFrame([[1,'a'],[2,'b']],columns=columns)\n assert_frame_equal(result, expected, check_dtype=False)\n\n # GH 4297\n # support Array\n import array\n result = DataFrame.from_items([('A', array.array('i', range(10)))])\n expected = DataFrame({ 'A' : list(range(10)) })\n assert_frame_equal(result, expected, check_dtype=False)\n\n expected = DataFrame([ list(range(10)), list(range(10)) ])\n result = DataFrame([ array.array('i', range(10)), array.array('i',range(10)) ])\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_iterator(self):\n\n expected = DataFrame([ list(range(10)), list(range(10)) ])\n result = DataFrame([ range(10), range(10) ])\n assert_frame_equal(result, expected)\n\n def test_constructor_generator(self):\n #related #2305\n\n gen1 = (i for i in range(10))\n gen2 = (i for i in range(10))\n\n expected = DataFrame([ list(range(10)), list(range(10)) ])\n result = DataFrame([ gen1, gen2 ])\n assert_frame_equal(result, expected)\n\n gen = ([ i, 'a'] for i in range(10))\n result = DataFrame(gen)\n expected = DataFrame({ 0 : range(10), 1 : 'a' })\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_constructor_list_of_dicts(self):\n data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),\n OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),\n OrderedDict([['a', 1.5], ['d', 6]]),\n OrderedDict(),\n OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),\n OrderedDict([['b', 3], ['c', 4], ['d', 6]])]\n\n result = DataFrame(data)\n expected = DataFrame.from_dict(dict(zip(range(len(data)), data)),\n orient='index')\n assert_frame_equal(result, expected.reindex(result.index))\n\n result = DataFrame([{}])\n expected = DataFrame(index=[0])\n assert_frame_equal(result, expected)\n\n def test_constructor_list_of_series(self):\n data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),\n OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]\n sdict = OrderedDict(zip(['x', 'y'], data))\n idx = Index(['a', 'b', 'c'])\n\n # all named\n data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),\n Series([1.5, 3, 6], idx, name='y')]\n result = DataFrame(data2)\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result, expected)\n\n # some unnamed\n data2 = [Series([1.5, 3, 4], idx, dtype='O', name='x'),\n Series([1.5, 3, 6], idx)]\n result = DataFrame(data2)\n\n sdict = OrderedDict(zip(['x', 'Unnamed 0'], data))\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result.sort_index(), expected)\n\n # none named\n data = [OrderedDict([['a', 1.5], ['b', 3], ['c', 4], ['d', 6]]),\n OrderedDict([['a', 1.5], ['b', 3], ['d', 6]]),\n OrderedDict([['a', 1.5], ['d', 6]]),\n OrderedDict(),\n OrderedDict([['a', 1.5], ['b', 3], ['c', 4]]),\n OrderedDict([['b', 3], ['c', 4], ['d', 6]])]\n data = [Series(d) for d in data]\n\n result = DataFrame(data)\n sdict = OrderedDict(zip(range(len(data)), data))\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result, expected.reindex(result.index))\n\n result2 = DataFrame(data, index=np.arange(6))\n assert_frame_equal(result, result2)\n\n result = DataFrame([Series({})])\n expected = DataFrame(index=[0])\n assert_frame_equal(result, expected)\n\n data = [OrderedDict([['a', 1.5], ['b', 3.0], ['c', 4.0]]),\n OrderedDict([['a', 1.5], ['b', 3.0], ['c', 6.0]])]\n sdict = OrderedDict(zip(range(len(data)), data))\n\n idx = Index(['a', 'b', 'c'])\n data2 = [Series([1.5, 3, 4], idx, dtype='O'),\n Series([1.5, 3, 6], idx)]\n result = DataFrame(data2)\n expected = DataFrame.from_dict(sdict, orient='index')\n assert_frame_equal(result, expected)\n\n def test_constructor_list_of_derived_dicts(self):\n class CustomDict(dict):\n pass\n d = {'a': 1.5, 'b': 3}\n\n data_custom = [CustomDict(d)]\n data = [d]\n\n result_custom = DataFrame(data_custom)\n result = DataFrame(data)\n assert_frame_equal(result, result_custom)\n\n def test_constructor_ragged(self):\n data = {'A': randn(10),\n 'B': randn(8)}\n with assertRaisesRegexp(ValueError, 'arrays must all be same length'):\n DataFrame(data)\n\n def test_constructor_scalar(self):\n idx = Index(lrange(3))\n df = DataFrame({\"a\": 0}, index=idx)\n expected = DataFrame({\"a\": [0, 0, 0]}, index=idx)\n assert_frame_equal(df, expected, check_dtype=False)\n\n def test_constructor_Series_copy_bug(self):\n df = DataFrame(self.frame['A'], index=self.frame.index, columns=['A'])\n df.copy()\n\n def test_constructor_mixed_dict_and_Series(self):\n data = {}\n data['A'] = {'foo': 1, 'bar': 2, 'baz': 3}\n data['B'] = Series([4, 3, 2, 1], index=['bar', 'qux', 'baz', 'foo'])\n\n result = DataFrame(data)\n self.assertTrue(result.index.is_monotonic)\n\n # ordering ambiguous, raise exception\n with assertRaisesRegexp(ValueError, 'ambiguous ordering'):\n DataFrame({'A': ['a', 'b'], 'B': {'a': 'a', 'b': 'b'}})\n\n # this is OK though\n result = DataFrame({'A': ['a', 'b'],\n 'B': Series(['a', 'b'], index=['a', 'b'])})\n expected = DataFrame({'A': ['a', 'b'], 'B': ['a', 'b']},\n index=['a', 'b'])\n assert_frame_equal(result, expected)\n\n def test_constructor_tuples(self):\n result = DataFrame({'A': [(1, 2), (3, 4)]})\n expected = DataFrame({'A': Series([(1, 2), (3, 4)])})\n assert_frame_equal(result, expected)\n\n def test_constructor_orient(self):\n data_dict = self.mixed_frame.T._series\n recons = DataFrame.from_dict(data_dict, orient='index')\n expected = self.mixed_frame.sort_index()\n assert_frame_equal(recons, expected)\n\n # dict of sequence\n a = {'hi': [32, 3, 3],\n 'there': [3, 5, 3]}\n rs = DataFrame.from_dict(a, orient='index')\n xp = DataFrame.from_dict(a).T.reindex(list(a.keys()))\n assert_frame_equal(rs, xp)\n\n def test_constructor_Series_named(self):\n a = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')\n df = DataFrame(a)\n self.assertEqual(df.columns[0], 'x')\n self.assertTrue(df.index.equals(a.index))\n\n # ndarray like\n arr = np.random.randn(10)\n s = Series(arr,name='x')\n df = DataFrame(s)\n expected = DataFrame(dict(x = s))\n assert_frame_equal(df,expected)\n\n s = Series(arr,index=range(3,13))\n df = DataFrame(s)\n expected = DataFrame({ 0 : s })\n assert_frame_equal(df,expected)\n\n self.assertRaises(ValueError, DataFrame, s, columns=[1,2])\n\n # #2234\n a = Series([], name='x')\n df = DataFrame(a)\n self.assertEqual(df.columns[0], 'x')\n\n # series with name and w/o\n s1 = Series(arr,name='x')\n df = DataFrame([s1, arr]).T\n expected = DataFrame({ 'x' : s1, 'Unnamed 0' : arr },columns=['x','Unnamed 0'])\n assert_frame_equal(df,expected)\n\n # this is a bit non-intuitive here; the series collapse down to arrays\n df = DataFrame([arr, s1]).T\n expected = DataFrame({ 1 : s1, 0 : arr },columns=[0,1])\n assert_frame_equal(df,expected)\n\n def test_constructor_Series_differently_indexed(self):\n # name\n s1 = Series([1, 2, 3], index=['a', 'b', 'c'], name='x')\n\n # no name\n s2 = Series([1, 2, 3], index=['a', 'b', 'c'])\n\n other_index = Index(['a', 'b'])\n\n df1 = DataFrame(s1, index=other_index)\n exp1 = DataFrame(s1.reindex(other_index))\n self.assertEqual(df1.columns[0], 'x')\n assert_frame_equal(df1, exp1)\n\n df2 = DataFrame(s2, index=other_index)\n exp2 = DataFrame(s2.reindex(other_index))\n self.assertEqual(df2.columns[0], 0)\n self.assertTrue(df2.index.equals(other_index))\n assert_frame_equal(df2, exp2)\n\n def test_constructor_manager_resize(self):\n index = list(self.frame.index[:5])\n columns = list(self.frame.columns[:3])\n\n result = DataFrame(self.frame._data, index=index,\n columns=columns)\n self.assert_numpy_array_equal(result.index, index)\n self.assert_numpy_array_equal(result.columns, columns)\n\n def test_constructor_from_items(self):\n items = [(c, self.frame[c]) for c in self.frame.columns]\n recons = DataFrame.from_items(items)\n assert_frame_equal(recons, self.frame)\n\n # pass some columns\n recons = DataFrame.from_items(items, columns=['C', 'B', 'A'])\n assert_frame_equal(recons, self.frame.ix[:, ['C', 'B', 'A']])\n\n # orient='index'\n\n row_items = [(idx, self.mixed_frame.xs(idx))\n for idx in self.mixed_frame.index]\n\n recons = DataFrame.from_items(row_items,\n columns=self.mixed_frame.columns,\n orient='index')\n assert_frame_equal(recons, self.mixed_frame)\n self.assertEqual(recons['A'].dtype, np.float64)\n\n with tm.assertRaisesRegexp(TypeError,\n \"Must pass columns with orient='index'\"):\n DataFrame.from_items(row_items, orient='index')\n\n # orient='index', but thar be tuples\n arr = lib.list_to_object_array(\n [('bar', 'baz')] * len(self.mixed_frame))\n self.mixed_frame['foo'] = arr\n row_items = [(idx, list(self.mixed_frame.xs(idx)))\n for idx in self.mixed_frame.index]\n recons = DataFrame.from_items(row_items,\n columns=self.mixed_frame.columns,\n orient='index')\n assert_frame_equal(recons, self.mixed_frame)\n tm.assert_isinstance(recons['foo'][0], tuple)\n\n rs = DataFrame.from_items([('A', [1, 2, 3]), ('B', [4, 5, 6])],\n orient='index', columns=['one', 'two', 'three'])\n xp = DataFrame([[1, 2, 3], [4, 5, 6]], index=['A', 'B'],\n columns=['one', 'two', 'three'])\n assert_frame_equal(rs, xp)\n\n def test_constructor_mix_series_nonseries(self):\n df = DataFrame({'A': self.frame['A'],\n 'B': list(self.frame['B'])}, columns=['A', 'B'])\n assert_frame_equal(df, self.frame.ix[:, ['A', 'B']])\n\n with tm.assertRaisesRegexp(ValueError, 'does not match index length'):\n DataFrame({'A': self.frame['A'], 'B': list(self.frame['B'])[:-2]})\n\n def test_constructor_miscast_na_int_dtype(self):\n df = DataFrame([[np.nan, 1], [1, 0]], dtype=np.int64)\n expected = DataFrame([[np.nan, 1], [1, 0]])\n assert_frame_equal(df, expected)\n\n def test_constructor_iterator_failure(self):\n with assertRaisesRegexp(TypeError, 'iterator'):\n df = DataFrame(iter([1, 2, 3]))\n\n def test_constructor_column_duplicates(self):\n # it works! #2079\n df = DataFrame([[8, 5]], columns=['a', 'a'])\n edf = DataFrame([[8, 5]])\n edf.columns = ['a', 'a']\n\n assert_frame_equal(df, edf)\n\n idf = DataFrame.from_items(\n [('a', [8]), ('a', [5])], columns=['a', 'a'])\n assert_frame_equal(idf, edf)\n\n self.assertRaises(ValueError, DataFrame.from_items,\n [('a', [8]), ('a', [5]), ('b', [6])],\n columns=['b', 'a', 'a'])\n\n def test_column_dups_operations(self):\n\n def check(result, expected=None):\n if expected is not None:\n assert_frame_equal(result,expected)\n result.dtypes\n str(result)\n\n # assignment\n # GH 3687\n arr = np.random.randn(3, 2)\n idx = lrange(2)\n df = DataFrame(arr, columns=['A', 'A'])\n df.columns = idx\n expected = DataFrame(arr,columns=idx)\n check(df,expected)\n\n idx = date_range('20130101',periods=4,freq='Q-NOV')\n df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['a','a','a','a'])\n df.columns = idx\n expected = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=idx)\n check(df,expected)\n\n # insert\n df = DataFrame([[1,1,1,5],[1,1,2,5],[2,1,3,5]],columns=['foo','bar','foo','hello'])\n df['string'] = 'bah'\n expected = DataFrame([[1,1,1,5,'bah'],[1,1,2,5,'bah'],[2,1,3,5,'bah']],columns=['foo','bar','foo','hello','string'])\n check(df,expected)\n with assertRaisesRegexp(ValueError, 'Length of value'):\n df.insert(0, 'AnotherColumn', range(len(df.index) - 1))\n\n # insert same dtype\n df['foo2'] = 3\n expected = DataFrame([[1,1,1,5,'bah',3],[1,1,2,5,'bah',3],[2,1,3,5,'bah',3]],columns=['foo','bar','foo','hello','string','foo2'])\n check(df,expected)\n\n # set (non-dup)\n df['foo2'] = 4\n expected = DataFrame([[1,1,1,5,'bah',4],[1,1,2,5,'bah',4],[2,1,3,5,'bah',4]],columns=['foo','bar','foo','hello','string','foo2'])\n check(df,expected)\n df['foo2'] = 3\n\n # delete (non dup)\n del df['bar']\n expected = DataFrame([[1,1,5,'bah',3],[1,2,5,'bah',3],[2,3,5,'bah',3]],columns=['foo','foo','hello','string','foo2'])\n check(df,expected)\n\n # try to delete again (its not consolidated)\n del df['hello']\n expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])\n check(df,expected)\n\n # consolidate\n df = df.consolidate()\n expected = DataFrame([[1,1,'bah',3],[1,2,'bah',3],[2,3,'bah',3]],columns=['foo','foo','string','foo2'])\n check(df,expected)\n\n # insert\n df.insert(2,'new_col',5.)\n expected = DataFrame([[1,1,5.,'bah',3],[1,2,5.,'bah',3],[2,3,5.,'bah',3]],columns=['foo','foo','new_col','string','foo2'])\n check(df,expected)\n\n # insert a dup\n assertRaisesRegexp(ValueError, 'cannot insert', df.insert, 2, 'new_col', 4.)\n df.insert(2,'new_col',4.,allow_duplicates=True)\n expected = DataFrame([[1,1,4.,5.,'bah',3],[1,2,4.,5.,'bah',3],[2,3,4.,5.,'bah',3]],columns=['foo','foo','new_col','new_col','string','foo2'])\n check(df,expected)\n\n # delete (dup)\n del df['foo']\n expected = DataFrame([[4.,5.,'bah',3],[4.,5.,'bah',3],[4.,5.,'bah',3]],columns=['new_col','new_col','string','foo2'])\n assert_frame_equal(df,expected)\n\n # dup across dtypes\n df = DataFrame([[1,1,1.,5],[1,1,2.,5],[2,1,3.,5]],columns=['foo','bar','foo','hello'])\n check(df)\n\n df['foo2'] = 7.\n expected = DataFrame([[1,1,1.,5,7.],[1,1,2.,5,7.],[2,1,3.,5,7.]],columns=['foo','bar','foo','hello','foo2'])\n check(df,expected)\n\n result = df['foo']\n expected = DataFrame([[1,1.],[1,2.],[2,3.]],columns=['foo','foo'])\n check(result,expected)\n\n # multiple replacements\n df['foo'] = 'string'\n expected = DataFrame([['string',1,'string',5,7.],['string',1,'string',5,7.],['string',1,'string',5,7.]],columns=['foo','bar','foo','hello','foo2'])\n check(df,expected)\n\n del df['foo']\n expected = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','hello','foo2'])\n check(df,expected)\n\n # values\n df = DataFrame([[1,2.5],[3,4.5]], index=[1,2], columns=['x','x'])\n result = df.values\n expected = np.array([[1,2.5],[3,4.5]])\n self.assertTrue((result == expected).all().all())\n\n # rename, GH 4403\n df4 = DataFrame({'TClose': [22.02],\n 'RT': [0.0454],\n 'TExg': [0.0422]},\n index=MultiIndex.from_tuples([(600809, 20130331)], names=['STK_ID', 'RPT_Date']))\n\n df5 = DataFrame({'STK_ID': [600809] * 3,\n 'RPT_Date': [20120930,20121231,20130331],\n 'STK_Name': [u('饡驦'), u('饡驦'), u('饡驦')],\n 'TClose': [38.05, 41.66, 30.01]},\n index=MultiIndex.from_tuples([(600809, 20120930), (600809, 20121231),(600809,20130331)], names=['STK_ID', 'RPT_Date']))\n\n k = pd.merge(df4,df5,how='inner',left_index=True,right_index=True)\n result = k.rename(columns={'TClose_x':'TClose', 'TClose_y':'QT_Close'})\n str(result)\n result.dtypes\n\n expected = DataFrame([[0.0454, 22.02, 0.0422, 20130331, 600809, u('饡驦'), 30.01 ]],\n columns=['RT','TClose','TExg','RPT_Date','STK_ID','STK_Name','QT_Close']).set_index(['STK_ID','RPT_Date'],drop=False)\n assert_frame_equal(result,expected)\n\n # reindex is invalid!\n df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])\n self.assertRaises(ValueError, df.reindex, columns=['bar'])\n self.assertRaises(ValueError, df.reindex, columns=['bar','foo'])\n\n # drop\n df = DataFrame([[1,5,7.],[1,5,7.],[1,5,7.]],columns=['bar','a','a'])\n result = df.drop(['a'],axis=1)\n expected = DataFrame([[1],[1],[1]],columns=['bar'])\n check(result,expected)\n result = df.drop('a',axis=1)\n check(result,expected)\n\n # describe\n df = DataFrame([[1,1,1],[2,2,2],[3,3,3]],columns=['bar','a','a'],dtype='float64')\n result = df.describe()\n s = df.iloc[:,0].describe()\n expected = pd.concat([ s, s, s],keys=df.columns,axis=1)\n check(result,expected)\n\n # check column dups with index equal and not equal to df's index\n df = DataFrame(np.random.randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],\n columns=['A', 'B', 'A'])\n for index in [df.index, pd.Index(list('edcba'))]:\n this_df = df.copy()\n expected_ser = pd.Series(index.values, index=this_df.index)\n expected_df = DataFrame.from_items([('A', expected_ser),\n ('B', this_df['B']),\n ('A', expected_ser)])\n this_df['A'] = index\n check(this_df, expected_df)\n\n # operations\n for op in ['__add__','__mul__','__sub__','__truediv__']:\n df = DataFrame(dict(A = np.arange(10), B = np.random.rand(10)))\n expected = getattr(df,op)(df)\n expected.columns = ['A','A']\n df.columns = ['A','A']\n result = getattr(df,op)(df)\n check(result,expected)\n\n # multiple assignments that change dtypes\n # the location indexer is a slice\n # GH 6120\n df = DataFrame(np.random.randn(5,2), columns=['that', 'that'])\n expected = DataFrame(1.0, index=range(5), columns=['that', 'that'])\n\n df['that'] = 1.0\n check(df, expected)\n\n df = DataFrame(np.random.rand(5,2), columns=['that', 'that'])\n expected = DataFrame(1, index=range(5), columns=['that', 'that'])\n\n df['that'] = 1\n check(df, expected)\n\n def test_column_dups2(self):\n\n # drop buggy GH 6240\n df = DataFrame({'A' : np.random.randn(5),\n 'B' : np.random.randn(5),\n 'C' : np.random.randn(5),\n 'D' : ['a','b','c','d','e'] })\n\n expected = df.take([0,1,1], axis=1)\n df2 = df.take([2,0,1,2,1], axis=1)\n result = df2.drop('C',axis=1)\n assert_frame_equal(result, expected)\n\n # dropna\n df = DataFrame({'A' : np.random.randn(5),\n 'B' : np.random.randn(5),\n 'C' : np.random.randn(5),\n 'D' : ['a','b','c','d','e'] })\n df.iloc[2,[0,1,2]] = np.nan\n df.iloc[0,0] = np.nan\n df.iloc[1,1] = np.nan\n df.iloc[:,3] = np.nan\n expected = df.dropna(subset=['A','B','C'],how='all')\n expected.columns = ['A','A','B','C']\n\n df.columns = ['A','A','B','C']\n\n result = df.dropna(subset=['A','C'],how='all')\n assert_frame_equal(result, expected)\n\n def test_column_dups_indexing(self):\n def check(result, expected=None):\n if expected is not None:\n assert_frame_equal(result,expected)\n result.dtypes\n str(result)\n\n # boolean indexing\n # GH 4879\n dups = ['A', 'A', 'C', 'D']\n df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')\n expected = df[df.C > 6]\n expected.columns = dups\n df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')\n result = df[df.C > 6]\n check(result,expected)\n\n # where\n df = DataFrame(np.arange(12).reshape(3,4), columns=['A', 'B', 'C', 'D'],dtype='float64')\n expected = df[df > 6]\n expected.columns = dups\n df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')\n result = df[df > 6]\n check(result,expected)\n\n # boolean with the duplicate raises\n df = DataFrame(np.arange(12).reshape(3,4), columns=dups,dtype='float64')\n self.assertRaises(ValueError, lambda : df[df.A > 6])\n\n # dup aligining operations should work\n # GH 5185\n df1 = DataFrame([1, 2, 3, 4, 5], index=[1, 2, 1, 2, 3])\n df2 = DataFrame([1, 2, 3], index=[1, 2, 3])\n expected = DataFrame([0,2,0,2,2],index=[1,1,2,2,3])\n result = df1.sub(df2)\n assert_frame_equal(result,expected)\n\n # equality\n df1 = DataFrame([[1,2],[2,np.nan],[3,4],[4,4]],columns=['A','B'])\n df2 = DataFrame([[0,1],[2,4],[2,np.nan],[4,5]],columns=['A','A'])\n\n # not-comparing like-labelled\n self.assertRaises(ValueError, lambda : df1 == df2)\n\n df1r = df1.reindex_like(df2)\n result = df1r == df2\n expected = DataFrame([[False,True],[True,False],[False,False],[True,False]],columns=['A','A'])\n assert_frame_equal(result,expected)\n\n # mixed column selection\n # GH 5639\n dfbool = DataFrame({'one' : Series([True, True, False], index=['a', 'b', 'c']),\n 'two' : Series([False, False, True, False], index=['a', 'b', 'c', 'd']),\n 'three': Series([False, True, True, True], index=['a', 'b', 'c', 'd'])})\n expected = pd.concat([dfbool['one'],dfbool['three'],dfbool['one']],axis=1)\n result = dfbool[['one', 'three', 'one']]\n check(result,expected)\n\n # multi-axis dups\n # GH 6121\n df = DataFrame(np.arange(25.).reshape(5,5),\n index=['a', 'b', 'c', 'd', 'e'],\n columns=['A', 'B', 'C', 'D', 'E'])\n z = df[['A', 'C', 'A']].copy()\n expected = z.ix[['a', 'c', 'a']]\n\n df = DataFrame(np.arange(25.).reshape(5,5),\n index=['a', 'b', 'c', 'd', 'e'],\n columns=['A', 'B', 'C', 'D', 'E'])\n z = df[['A', 'C', 'A']]\n result = z.ix[['a', 'c', 'a']]\n check(result,expected)\n\n def test_insert_benchmark(self):\n # from the vb_suite/frame_methods/frame_insert_columns\n N = 10\n K = 5\n df = DataFrame(index=lrange(N))\n new_col = np.random.randn(N)\n for i in range(K):\n df[i] = new_col\n expected = DataFrame(np.repeat(new_col,K).reshape(N,K),index=lrange(N))\n assert_frame_equal(df,expected)\n\n def test_constructor_single_value(self):\n\n # expecting single value upcasting here\n df = DataFrame(0., index=[1, 2, 3], columns=['a', 'b', 'c'])\n assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('float64'), df.index,\n df.columns))\n\n df = DataFrame(0, index=[1, 2, 3], columns=['a', 'b', 'c'])\n assert_frame_equal(df, DataFrame(np.zeros(df.shape).astype('int64'), df.index,\n df.columns))\n\n\n df = DataFrame('a', index=[1, 2], columns=['a', 'c'])\n assert_frame_equal(df, DataFrame(np.array([['a', 'a'],\n ['a', 'a']],\n dtype=object),\n index=[1, 2],\n columns=['a', 'c']))\n\n self.assertRaises(com.PandasError, DataFrame, 'a', [1, 2])\n self.assertRaises(com.PandasError, DataFrame, 'a', columns=['a', 'c'])\n with tm.assertRaisesRegexp(TypeError, 'incompatible data and dtype'):\n DataFrame('a', [1, 2], ['a', 'c'], float)\n\n def test_constructor_with_datetimes(self):\n intname = np.dtype(np.int_).name\n floatname = np.dtype(np.float_).name\n datetime64name = np.dtype('M8[ns]').name\n objectname = np.dtype(np.object_).name\n\n # single item\n df = DataFrame({'A' : 1, 'B' : 'foo', 'C' : 'bar', 'D' : Timestamp(\"20010101\"), 'E' : datetime(2001,1,2,0,0) },\n index=np.arange(10))\n result = df.get_dtype_counts()\n expected = Series({'int64': 1, datetime64name: 2, objectname : 2})\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # check with ndarray construction ndim==0 (e.g. we are passing a ndim 0 ndarray with a dtype specified)\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array(1.,dtype=floatname),\n intname : np.array(1,dtype=intname)}, index=np.arange(10))\n result = df.get_dtype_counts()\n expected = { objectname : 1 }\n if intname == 'int64':\n expected['int64'] = 2\n else:\n expected['int64'] = 1\n expected[intname] = 1\n if floatname == 'float64':\n expected['float64'] = 2\n else:\n expected['float64'] = 1\n expected[floatname] = 1\n\n result.sort_index()\n expected = Series(expected)\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # check with ndarray construction ndim>0\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', floatname : np.array([1.]*10,dtype=floatname),\n intname : np.array([1]*10,dtype=intname)}, index=np.arange(10))\n result = df.get_dtype_counts()\n result.sort_index()\n assert_series_equal(result, expected)\n\n # GH 2809\n ind = date_range(start=\"2000-01-01\", freq=\"D\", periods=10)\n datetimes = [ts.to_pydatetime() for ts in ind]\n datetime_s = Series(datetimes)\n self.assertEqual(datetime_s.dtype, 'M8[ns]')\n df = DataFrame({'datetime_s':datetime_s})\n result = df.get_dtype_counts()\n expected = Series({ datetime64name : 1 })\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # GH 2810\n ind = date_range(start=\"2000-01-01\", freq=\"D\", periods=10)\n datetimes = [ts.to_pydatetime() for ts in ind]\n dates = [ts.date() for ts in ind]\n df = DataFrame({'datetimes': datetimes, 'dates':dates})\n result = df.get_dtype_counts()\n expected = Series({ datetime64name : 1, objectname : 1 })\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n # GH 7594\n # don't coerce tz-aware\n import pytz\n tz = pytz.timezone('US/Eastern')\n dt = tz.localize(datetime(2012, 1, 1))\n df = DataFrame({'End Date': dt}, index=[0])\n self.assertEqual(df.iat[0,0],dt)\n assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') }))\n\n df = DataFrame([{'End Date': dt}])\n self.assertEqual(df.iat[0,0],dt)\n assert_series_equal(df.dtypes,Series({'End Date' : np.dtype('object') }))\n\n def test_constructor_for_list_with_dtypes(self):\n intname = np.dtype(np.int_).name\n floatname = np.dtype(np.float_).name\n datetime64name = np.dtype('M8[ns]').name\n objectname = np.dtype(np.object_).name\n\n # test list of lists/ndarrays\n df = DataFrame([np.arange(5) for x in range(5)])\n result = df.get_dtype_counts()\n expected = Series({'int64' : 5})\n\n df = DataFrame([np.array(np.arange(5),dtype='int32') for x in range(5)])\n result = df.get_dtype_counts()\n expected = Series({'int32' : 5})\n\n # overflow issue? (we always expecte int64 upcasting here)\n df = DataFrame({'a' : [2**31,2**31+1]})\n result = df.get_dtype_counts()\n expected = Series({'int64' : 1 })\n assert_series_equal(result, expected)\n\n # GH #2751 (construction with no index specified), make sure we cast to platform values\n df = DataFrame([1, 2])\n result = df.get_dtype_counts()\n expected = Series({'int64': 1 })\n assert_series_equal(result, expected)\n\n df = DataFrame([1.,2.])\n result = df.get_dtype_counts()\n expected = Series({'float64' : 1 })\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : [1, 2]})\n result = df.get_dtype_counts()\n expected = Series({'int64' : 1})\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : [1., 2.]})\n result = df.get_dtype_counts()\n expected = Series({'float64' : 1})\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : 1 }, index=lrange(3))\n result = df.get_dtype_counts()\n expected = Series({'int64': 1})\n assert_series_equal(result, expected)\n\n df = DataFrame({'a' : 1. }, index=lrange(3))\n result = df.get_dtype_counts()\n expected = Series({'float64': 1 })\n assert_series_equal(result, expected)\n\n # with object list\n df = DataFrame({'a':[1,2,4,7], 'b':[1.2, 2.3, 5.1, 6.3],\n 'c':list('abcd'), 'd':[datetime(2000,1,1) for i in range(4)],\n 'e' : [1.,2,4.,7]})\n result = df.get_dtype_counts()\n expected = Series({'int64': 1, 'float64' : 2, datetime64name: 1, objectname : 1})\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n def test_not_hashable(self):\n df = pd.DataFrame([1])\n self.assertRaises(TypeError, hash, df)\n self.assertRaises(TypeError, hash, self.empty)\n\n def test_timedeltas(self):\n\n df = DataFrame(dict(A = Series(date_range('2012-1-1', periods=3, freq='D')),\n B = Series([ timedelta(days=i) for i in range(3) ])))\n result = df.get_dtype_counts()\n expected = Series({'datetime64[ns]': 1, 'timedelta64[ns]' : 1 })\n result.sort()\n expected.sort()\n assert_series_equal(result, expected)\n\n df['C'] = df['A'] + df['B']\n expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1 })\n result = df.get_dtype_counts()\n result.sort()\n expected.sort()\n assert_series_equal(result, expected)\n\n # mixed int types\n df['D'] = 1\n expected = Series({'datetime64[ns]': 2, 'timedelta64[ns]' : 1, 'int64' : 1 })\n result = df.get_dtype_counts()\n result.sort()\n expected.sort()\n assert_series_equal(result, expected)\n\n def test_operators_timedelta64(self):\n\n from datetime import datetime, timedelta\n df = DataFrame(dict(A = date_range('2012-1-1', periods=3, freq='D'),\n B = date_range('2012-1-2', periods=3, freq='D'),\n C = Timestamp('20120101')-timedelta(minutes=5,seconds=5)))\n\n diffs = DataFrame(dict(A = df['A']-df['C'],\n B = df['A']-df['B']))\n\n\n # min\n result = diffs.min()\n self.assertEqual(result[0], diffs.ix[0,'A'])\n self.assertEqual(result[1], diffs.ix[0,'B'])\n\n result = diffs.min(axis=1)\n self.assertTrue((result == diffs.ix[0,'B']).all() == True)\n\n # max\n result = diffs.max()\n self.assertEqual(result[0], diffs.ix[2,'A'])\n self.assertEqual(result[1], diffs.ix[2,'B'])\n\n result = diffs.max(axis=1)\n self.assertTrue((result == diffs['A']).all() == True)\n\n # abs\n result = diffs.abs()\n result2 = abs(diffs)\n expected = DataFrame(dict(A = df['A']-df['C'],\n B = df['B']-df['A']))\n assert_frame_equal(result,expected)\n assert_frame_equal(result2, expected)\n\n # mixed frame\n mixed = diffs.copy()\n mixed['C'] = 'foo'\n mixed['D'] = 1\n mixed['E'] = 1.\n mixed['F'] = Timestamp('20130101')\n\n # results in an object array\n from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type\n result = mixed.min()\n expected = Series([_coerce_scalar_to_timedelta_type(timedelta(seconds=5*60+5)),\n _coerce_scalar_to_timedelta_type(timedelta(days=-1)),\n 'foo',\n 1,\n 1.0,\n Timestamp('20130101')],\n index=mixed.columns)\n assert_series_equal(result,expected)\n\n # excludes numeric\n result = mixed.min(axis=1)\n expected = Series([1, 1, 1.],index=[0, 1, 2])\n assert_series_equal(result,expected)\n\n # works when only those columns are selected\n result = mixed[['A','B']].min(1)\n expected = Series([ timedelta(days=-1) ] * 3)\n assert_series_equal(result,expected)\n\n result = mixed[['A','B']].min()\n expected = Series([ timedelta(seconds=5*60+5), timedelta(days=-1) ],index=['A','B'])\n assert_series_equal(result,expected)\n\n # GH 3106\n df = DataFrame({'time' : date_range('20130102',periods=5),\n 'time2' : date_range('20130105',periods=5) })\n df['off1'] = df['time2']-df['time']\n self.assertEqual(df['off1'].dtype, 'timedelta64[ns]')\n\n df['off2'] = df['time']-df['time2']\n df._consolidate_inplace()\n self.assertTrue(df['off1'].dtype == 'timedelta64[ns]')\n self.assertTrue(df['off2'].dtype == 'timedelta64[ns]')\n\n def test_datetimelike_setitem_with_inference(self):\n tm._skip_if_not_numpy17_friendly()\n\n # GH 7592\n # assignment of timedeltas with NaT\n\n one_hour = timedelta(hours=1)\n df = DataFrame(index=date_range('20130101',periods=4))\n df['A'] = np.array([1*one_hour]*4, dtype='m8[ns]')\n df.loc[:,'B'] = np.array([2*one_hour]*4, dtype='m8[ns]')\n df.loc[:3,'C'] = np.array([3*one_hour]*3, dtype='m8[ns]')\n df.ix[:,'D'] = np.array([4*one_hour]*4, dtype='m8[ns]')\n df.ix[:3,'E'] = np.array([5*one_hour]*3, dtype='m8[ns]')\n df['F'] = np.timedelta64('NaT')\n df.ix[:-1,'F'] = np.array([6*one_hour]*3, dtype='m8[ns]')\n df.ix[-3:,'G'] = date_range('20130101',periods=3)\n df['H'] = np.datetime64('NaT')\n result = df.dtypes\n expected = Series([np.dtype('timedelta64[ns]')]*6+[np.dtype('datetime64[ns]')]*2,index=list('ABCDEFGH'))\n assert_series_equal(result,expected)\n\n def test_new_empty_index(self):\n df1 = DataFrame(randn(0, 3))\n df2 = DataFrame(randn(0, 3))\n df1.index.name = 'foo'\n self.assertIsNone(df2.index.name)\n\n def test_astype(self):\n casted = self.frame.astype(int)\n expected = DataFrame(self.frame.values.astype(int),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(casted, expected)\n\n casted = self.frame.astype(np.int32)\n expected = DataFrame(self.frame.values.astype(np.int32),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(casted, expected)\n\n self.frame['foo'] = '5'\n casted = self.frame.astype(int)\n expected = DataFrame(self.frame.values.astype(int),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(casted, expected)\n\n # mixed casting\n def _check_cast(df, v):\n self.assertEqual(list(set([ s.dtype.name for _, s in compat.iteritems(df) ]))[0], v)\n\n mn = self.all_mixed._get_numeric_data().copy()\n mn['little_float'] = np.array(12345.,dtype='float16')\n mn['big_float'] = np.array(123456789101112.,dtype='float64')\n\n casted = mn.astype('float64')\n _check_cast(casted, 'float64')\n\n casted = mn.astype('int64')\n _check_cast(casted, 'int64')\n\n casted = self.mixed_float.reindex(columns = ['A','B']).astype('float32')\n _check_cast(casted, 'float32')\n\n casted = mn.reindex(columns = ['little_float']).astype('float16')\n _check_cast(casted, 'float16')\n\n casted = self.mixed_float.reindex(columns = ['A','B']).astype('float16')\n _check_cast(casted, 'float16')\n\n casted = mn.astype('float32')\n _check_cast(casted, 'float32')\n\n casted = mn.astype('int32')\n _check_cast(casted, 'int32')\n\n # to object\n casted = mn.astype('O')\n _check_cast(casted, 'object')\n\n def test_astype_with_exclude_string(self):\n df = self.frame.copy()\n expected = self.frame.astype(int)\n df['string'] = 'foo'\n casted = df.astype(int, raise_on_error = False)\n\n expected['string'] = 'foo'\n assert_frame_equal(casted, expected)\n\n df = self.frame.copy()\n expected = self.frame.astype(np.int32)\n df['string'] = 'foo'\n casted = df.astype(np.int32, raise_on_error = False)\n\n expected['string'] = 'foo'\n assert_frame_equal(casted, expected)\n\n def test_astype_with_view(self):\n\n tf = self.mixed_float.reindex(columns = ['A','B','C'])\n\n casted = tf.astype(np.int64)\n\n casted = tf.astype(np.float32)\n\n # this is the only real reason to do it this way\n tf = np.round(self.frame).astype(np.int32)\n casted = tf.astype(np.float32, copy = False)\n\n tf = self.frame.astype(np.float64)\n casted = tf.astype(np.int64, copy = False)\n\n def test_astype_cast_nan_int(self):\n df = DataFrame(data={\"Values\": [1.0, 2.0, 3.0, np.nan]})\n self.assertRaises(ValueError, df.astype, np.int64)\n\n def test_array_interface(self):\n result = np.sqrt(self.frame)\n tm.assert_isinstance(result, type(self.frame))\n self.assertIs(result.index, self.frame.index)\n self.assertIs(result.columns, self.frame.columns)\n\n assert_frame_equal(result, self.frame.apply(np.sqrt))\n\n def test_pickle(self):\n unpickled = pickle.loads(pickle.dumps(self.mixed_frame))\n assert_frame_equal(self.mixed_frame, unpickled)\n\n # buglet\n self.mixed_frame._data.ndim\n\n # empty\n unpickled = pickle.loads(pickle.dumps(self.empty))\n repr(unpickled)\n\n def test_to_dict(self):\n test_data = {\n 'A': {'1': 1, '2': 2},\n 'B': {'1': '1', '2': '2', '3': '3'},\n }\n recons_data = DataFrame(test_data).to_dict()\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n self.assertEqual(v2, recons_data[k][k2])\n\n recons_data = DataFrame(test_data).to_dict(\"l\")\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n self.assertEqual(v2, recons_data[k][int(k2) - 1])\n\n recons_data = DataFrame(test_data).to_dict(\"s\")\n\n for k, v in compat.iteritems(test_data):\n for k2, v2 in compat.iteritems(v):\n self.assertEqual(v2, recons_data[k][k2])\n\n recons_data = DataFrame(test_data).to_dict(\"r\")\n\n expected_records = [{'A': 1.0, 'B': '1'},\n {'A': 2.0, 'B': '2'},\n {'A': nan, 'B': '3'}]\n\n tm.assert_almost_equal(recons_data, expected_records)\n\n def test_to_records_dt64(self):\n df = DataFrame([[\"one\", \"two\", \"three\"],\n [\"four\", \"five\", \"six\"]],\n index=date_range(\"2012-01-01\", \"2012-01-02\"))\n self.assertEqual(df.to_records()['index'][0], df.index[0])\n\n rs = df.to_records(convert_datetime64=False)\n self.assertEqual(rs['index'][0], df.index.values[0])\n\n def test_to_records_with_multindex(self):\n # GH3189\n index = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],\n ['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]\n data = np.zeros((8, 4))\n df = DataFrame(data, index=index)\n r = df.to_records(index=True)['level_0']\n self.assertTrue('bar' in r)\n self.assertTrue('one' not in r)\n\n def test_to_records_with_Mapping_type(self):\n import email\n from email.parser import Parser\n import collections\n\n collections.Mapping.register(email.message.Message)\n\n headers = Parser().parsestr('From: <[email protected]>\\n'\n 'To: <[email protected]>\\n'\n 'Subject: Test message\\n'\n '\\n'\n 'Body would go here\\n')\n\n frame = DataFrame.from_records([headers])\n all( x in frame for x in ['Type','Subject','From'])\n\n def test_from_records_to_records(self):\n # from numpy documentation\n arr = np.zeros((2,), dtype=('i4,f4,a10'))\n arr[:] = [(1, 2., 'Hello'), (2, 3., \"World\")]\n\n frame = DataFrame.from_records(arr)\n\n index = np.arange(len(arr))[::-1]\n indexed_frame = DataFrame.from_records(arr, index=index)\n self.assert_numpy_array_equal(indexed_frame.index, index)\n\n # without names, it should go to last ditch\n arr2 = np.zeros((2,3))\n tm.assert_frame_equal(DataFrame.from_records(arr2), DataFrame(arr2))\n\n # wrong length\n msg = r'Shape of passed values is \\(3,\\), indices imply \\(3, 1\\)'\n with assertRaisesRegexp(ValueError, msg):\n DataFrame.from_records(arr, index=index[:-1])\n\n indexed_frame = DataFrame.from_records(arr, index='f1')\n\n # what to do?\n records = indexed_frame.to_records()\n self.assertEqual(len(records.dtype.names), 3)\n\n records = indexed_frame.to_records(index=False)\n self.assertEqual(len(records.dtype.names), 2)\n self.assertNotIn('index', records.dtype.names)\n\n def test_from_records_nones(self):\n tuples = [(1, 2, None, 3),\n (1, 2, None, 3),\n (None, 2, 5, 3)]\n\n df = DataFrame.from_records(tuples, columns=['a', 'b', 'c', 'd'])\n self.assertTrue(np.isnan(df['c'][0]))\n\n def test_from_records_iterator(self):\n arr = np.array([(1.0, 1.0, 2, 2), (3.0, 3.0, 4, 4), (5., 5., 6, 6), (7., 7., 8, 8)],\n dtype=[('x', np.float64), ('u', np.float32), ('y', np.int64), ('z', np.int32) ])\n df = DataFrame.from_records(iter(arr), nrows=2)\n xp = DataFrame({'x': np.array([1.0, 3.0], dtype=np.float64),\n 'u': np.array([1.0, 3.0], dtype=np.float32),\n 'y': np.array([2, 4], dtype=np.int64),\n 'z': np.array([2, 4], dtype=np.int32)})\n assert_frame_equal(df.reindex_like(xp), xp)\n\n # no dtypes specified here, so just compare with the default\n arr = [(1.0, 2), (3.0, 4), (5., 6), (7., 8)]\n df = DataFrame.from_records(iter(arr), columns=['x', 'y'],\n nrows=2)\n assert_frame_equal(df, xp.reindex(columns=['x','y']), check_dtype=False)\n\n def test_from_records_tuples_generator(self):\n def tuple_generator(length):\n for i in range(length):\n letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n yield (i, letters[i % len(letters)], i/length)\n\n columns_names = ['Integer', 'String', 'Float']\n columns = [[i[j] for i in tuple_generator(10)] for j in range(len(columns_names))]\n data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}\n expected = DataFrame(data, columns=columns_names)\n\n generator = tuple_generator(10)\n result = DataFrame.from_records(generator, columns=columns_names)\n assert_frame_equal(result, expected)\n\n def test_from_records_lists_generator(self):\n def list_generator(length):\n for i in range(length):\n letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n yield [i, letters[i % len(letters)], i/length]\n\n columns_names = ['Integer', 'String', 'Float']\n columns = [[i[j] for i in list_generator(10)] for j in range(len(columns_names))]\n data = {'Integer': columns[0], 'String': columns[1], 'Float': columns[2]}\n expected = DataFrame(data, columns=columns_names)\n\n generator = list_generator(10)\n result = DataFrame.from_records(generator, columns=columns_names)\n assert_frame_equal(result, expected)\n\n def test_from_records_columns_not_modified(self):\n tuples = [(1, 2, 3),\n (1, 2, 3),\n (2, 5, 3)]\n\n columns = ['a', 'b', 'c']\n original_columns = list(columns)\n df = DataFrame.from_records(tuples, columns=columns, index='a')\n self.assertEqual(columns, original_columns)\n\n def test_from_records_decimal(self):\n from decimal import Decimal\n\n tuples = [(Decimal('1.5'),), (Decimal('2.5'),), (None,)]\n\n df = DataFrame.from_records(tuples, columns=['a'])\n self.assertEqual(df['a'].dtype, object)\n\n df = DataFrame.from_records(tuples, columns=['a'], coerce_float=True)\n self.assertEqual(df['a'].dtype, np.float64)\n self.assertTrue(np.isnan(df['a'].values[-1]))\n\n def test_from_records_duplicates(self):\n result = DataFrame.from_records([(1, 2, 3), (4, 5, 6)],\n columns=['a', 'b', 'a'])\n\n expected = DataFrame([(1, 2, 3), (4, 5, 6)],\n columns=['a', 'b', 'a'])\n\n assert_frame_equal(result, expected)\n\n def test_from_records_set_index_name(self):\n def create_dict(order_id):\n return {'order_id': order_id, 'quantity': np.random.randint(1, 10),\n 'price': np.random.randint(1, 10)}\n documents = [create_dict(i) for i in range(10)]\n # demo missing data\n documents.append({'order_id': 10, 'quantity': 5})\n\n result = DataFrame.from_records(documents, index='order_id')\n self.assertEqual(result.index.name, 'order_id')\n\n # MultiIndex\n result = DataFrame.from_records(documents,\n index=['order_id', 'quantity'])\n self.assertEqual(result.index.names, ('order_id', 'quantity'))\n\n def test_from_records_misc_brokenness(self):\n # #2179\n\n data = {1: ['foo'], 2: ['bar']}\n\n result = DataFrame.from_records(data, columns=['a', 'b'])\n exp = DataFrame(data, columns=['a', 'b'])\n assert_frame_equal(result, exp)\n\n # overlap in index/index_names\n\n data = {'a': [1, 2, 3], 'b': [4, 5, 6]}\n\n result = DataFrame.from_records(data, index=['a', 'b', 'c'])\n exp = DataFrame(data, index=['a', 'b', 'c'])\n assert_frame_equal(result, exp)\n\n\n # GH 2623\n rows = []\n rows.append([datetime(2010, 1, 1), 1])\n rows.append([datetime(2010, 1, 2), 'hi']) # test col upconverts to obj\n df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])\n results = df2_obj.get_dtype_counts()\n expected = Series({ 'datetime64[ns]' : 1, 'object' : 1 })\n\n rows = []\n rows.append([datetime(2010, 1, 1), 1])\n rows.append([datetime(2010, 1, 2), 1])\n df2_obj = DataFrame.from_records(rows, columns=['date', 'test'])\n results = df2_obj.get_dtype_counts()\n expected = Series({ 'datetime64[ns]' : 1, 'int64' : 1 })\n\n def test_from_records_empty(self):\n # 3562\n result = DataFrame.from_records([], columns=['a','b','c'])\n expected = DataFrame(columns=['a','b','c'])\n assert_frame_equal(result, expected)\n\n result = DataFrame.from_records([], columns=['a','b','b'])\n expected = DataFrame(columns=['a','b','b'])\n assert_frame_equal(result, expected)\n\n def test_from_records_empty_with_nonempty_fields_gh3682(self):\n a = np.array([(1, 2)], dtype=[('id', np.int64), ('value', np.int64)])\n df = DataFrame.from_records(a, index='id')\n assert_array_equal(df.index, Index([1], name='id'))\n self.assertEqual(df.index.name, 'id')\n assert_array_equal(df.columns, Index(['value']))\n\n b = np.array([], dtype=[('id', np.int64), ('value', np.int64)])\n df = DataFrame.from_records(b, index='id')\n assert_array_equal(df.index, Index([], name='id'))\n self.assertEqual(df.index.name, 'id')\n\n def test_from_records_with_datetimes(self):\n if sys.version < LooseVersion('2.7'):\n raise nose.SkipTest('rec arrays dont work properly with py2.6')\n\n # this may fail on certain platforms because of a numpy issue\n # related GH6140\n if not is_little_endian():\n raise nose.SkipTest(\"known failure of test on non-little endian\")\n\n # construction with a null in a recarray\n # GH 6140\n expected = DataFrame({ 'EXPIRY' : [datetime(2005, 3, 1, 0, 0), None ]})\n\n arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]\n dtypes = [('EXPIRY', '<M8[ns]')]\n\n try:\n recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)\n except (ValueError):\n raise nose.SkipTest(\"known failure of numpy rec array creation\")\n\n result = DataFrame.from_records(recarray)\n assert_frame_equal(result,expected)\n\n # coercion should work too\n arrdata = [np.array([datetime(2005, 3, 1, 0, 0), None])]\n dtypes = [('EXPIRY', '<M8[m]')]\n recarray = np.core.records.fromarrays(arrdata, dtype=dtypes)\n result = DataFrame.from_records(recarray)\n assert_frame_equal(result,expected)\n\n def test_to_records_floats(self):\n df = DataFrame(np.random.rand(10, 10))\n df.to_records()\n\n def test_to_recods_index_name(self):\n df = DataFrame(np.random.randn(3, 3))\n df.index.name = 'X'\n rs = df.to_records()\n self.assertIn('X', rs.dtype.fields)\n\n df = DataFrame(np.random.randn(3, 3))\n rs = df.to_records()\n self.assertIn('index', rs.dtype.fields)\n\n df.index = MultiIndex.from_tuples([('a', 'x'), ('a', 'y'), ('b', 'z')])\n df.index.names = ['A', None]\n rs = df.to_records()\n self.assertIn('level_0', rs.dtype.fields)\n\n def test_join_str_datetime(self):\n str_dates = ['20120209', '20120222']\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\n\n A = DataFrame(str_dates, index=lrange(2), columns=['aa'])\n C = DataFrame([[1, 2], [3, 4]], index=str_dates, columns=dt_dates)\n\n tst = A.join(C, on='aa')\n\n self.assertEqual(len(tst.columns), 3)\n\n def test_from_records_sequencelike(self):\n df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),\n 'A1': np.array(np.random.randn(6), dtype = np.float64),\n 'B' : np.array(np.arange(6), dtype = np.int64),\n 'C' : ['foo'] * 6,\n 'D' : np.array([True, False] * 3, dtype=bool),\n 'E' : np.array(np.random.randn(6), dtype = np.float32),\n 'E1': np.array(np.random.randn(6), dtype = np.float32),\n 'F' : np.array(np.arange(6), dtype = np.int32) })\n\n # this is actually tricky to create the recordlike arrays and have the dtypes be intact\n blocks = df.blocks\n tuples = []\n columns = []\n dtypes = []\n for dtype, b in compat.iteritems(blocks):\n columns.extend(b.columns)\n dtypes.extend([ (c,np.dtype(dtype).descr[0][1]) for c in b.columns ])\n for i in range(len(df.index)):\n tup = []\n for _, b in compat.iteritems(blocks):\n tup.extend(b.irow(i).values)\n tuples.append(tuple(tup))\n\n recarray = np.array(tuples, dtype=dtypes).view(np.recarray)\n recarray2 = df.to_records()\n lists = [list(x) for x in tuples]\n\n # tuples (lose the dtype info)\n result = DataFrame.from_records(tuples, columns=columns).reindex(columns=df.columns)\n\n # created recarray and with to_records recarray (have dtype info)\n result2 = DataFrame.from_records(recarray, columns=columns).reindex(columns=df.columns)\n result3 = DataFrame.from_records(recarray2, columns=columns).reindex(columns=df.columns)\n\n # list of tupels (no dtype info)\n result4 = DataFrame.from_records(lists, columns=columns).reindex(columns=df.columns)\n\n assert_frame_equal(result, df, check_dtype=False)\n assert_frame_equal(result2, df)\n assert_frame_equal(result3, df)\n assert_frame_equal(result4, df, check_dtype=False)\n\n # tuples is in the order of the columns\n result = DataFrame.from_records(tuples)\n self.assert_numpy_array_equal(result.columns, lrange(8))\n\n # test exclude parameter & we are casting the results here (as we don't have dtype info to recover)\n columns_to_test = [ columns.index('C'), columns.index('E1') ]\n\n exclude = list(set(range(8))-set(columns_to_test))\n result = DataFrame.from_records(tuples, exclude=exclude)\n result.columns = [ columns[i] for i in sorted(columns_to_test) ]\n assert_series_equal(result['C'], df['C'])\n assert_series_equal(result['E1'], df['E1'].astype('float64'))\n\n # empty case\n result = DataFrame.from_records([], columns=['foo', 'bar', 'baz'])\n self.assertEqual(len(result), 0)\n self.assert_numpy_array_equal(result.columns, ['foo', 'bar', 'baz'])\n\n result = DataFrame.from_records([])\n self.assertEqual(len(result), 0)\n self.assertEqual(len(result.columns), 0)\n\n def test_from_records_dictlike(self):\n\n # test the dict methods\n df = DataFrame({'A' : np.array(np.random.randn(6), dtype = np.float64),\n 'A1': np.array(np.random.randn(6), dtype = np.float64),\n 'B' : np.array(np.arange(6), dtype = np.int64),\n 'C' : ['foo'] * 6,\n 'D' : np.array([True, False] * 3, dtype=bool),\n 'E' : np.array(np.random.randn(6), dtype = np.float32),\n 'E1': np.array(np.random.randn(6), dtype = np.float32),\n 'F' : np.array(np.arange(6), dtype = np.int32) })\n\n # columns is in a different order here than the actual items iterated from the dict\n columns = []\n for dtype, b in compat.iteritems(df.blocks):\n columns.extend(b.columns)\n\n asdict = dict((x, y) for x, y in compat.iteritems(df))\n asdict2 = dict((x, y.values) for x, y in compat.iteritems(df))\n\n # dict of series & dict of ndarrays (have dtype info)\n results = []\n results.append(DataFrame.from_records(asdict).reindex(columns=df.columns))\n results.append(DataFrame.from_records(asdict, columns=columns).reindex(columns=df.columns))\n results.append(DataFrame.from_records(asdict2, columns=columns).reindex(columns=df.columns))\n\n for r in results:\n assert_frame_equal(r, df)\n\n def test_from_records_with_index_data(self):\n df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n\n data = np.random.randn(10)\n df1 = DataFrame.from_records(df, index=data)\n assert(df1.index.equals(Index(data)))\n\n def test_from_records_bad_index_column(self):\n df = DataFrame(np.random.randn(10, 3), columns=['A', 'B', 'C'])\n\n # should pass\n df1 = DataFrame.from_records(df, index=['C'])\n assert(df1.index.equals(Index(df.C)))\n\n df1 = DataFrame.from_records(df, index='C')\n assert(df1.index.equals(Index(df.C)))\n\n # should fail\n self.assertRaises(ValueError, DataFrame.from_records, df, index=[2])\n self.assertRaises(KeyError, DataFrame.from_records, df, index=2)\n\n def test_from_records_non_tuple(self):\n class Record(object):\n\n def __init__(self, *args):\n self.args = args\n\n def __getitem__(self, i):\n return self.args[i]\n\n def __iter__(self):\n return iter(self.args)\n\n recs = [Record(1, 2, 3), Record(4, 5, 6), Record(7, 8, 9)]\n tups = lmap(tuple, recs)\n\n result = DataFrame.from_records(recs)\n expected = DataFrame.from_records(tups)\n assert_frame_equal(result, expected)\n\n def test_from_records_len0_with_columns(self):\n # #2633\n result = DataFrame.from_records([], index='foo',\n columns=['foo', 'bar'])\n\n self.assertTrue(np.array_equal(result.columns, ['bar']))\n self.assertEqual(len(result), 0)\n self.assertEqual(result.index.name, 'foo')\n\n def test_get_agg_axis(self):\n cols = self.frame._get_agg_axis(0)\n self.assertIs(cols, self.frame.columns)\n\n idx = self.frame._get_agg_axis(1)\n self.assertIs(idx, self.frame.index)\n\n self.assertRaises(ValueError, self.frame._get_agg_axis, 2)\n\n def test_nonzero(self):\n self.assertTrue(self.empty.empty)\n\n self.assertFalse(self.frame.empty)\n self.assertFalse(self.mixed_frame.empty)\n\n # corner case\n df = DataFrame({'A': [1., 2., 3.],\n 'B': ['a', 'b', 'c']},\n index=np.arange(3))\n del df['A']\n self.assertFalse(df.empty)\n\n def test_repr_empty(self):\n buf = StringIO()\n\n # empty\n foo = repr(self.empty)\n\n # empty with index\n frame = DataFrame(index=np.arange(1000))\n foo = repr(frame)\n\n def test_repr_mixed(self):\n buf = StringIO()\n\n # mixed\n foo = repr(self.mixed_frame)\n self.mixed_frame.info(verbose=False, buf=buf)\n\n @slow\n def test_repr_mixed_big(self):\n # big mixed\n biggie = DataFrame({'A': randn(200),\n 'B': tm.makeStringIndex(200)},\n index=lrange(200))\n biggie['A'][:20] = nan\n biggie['B'][:20] = nan\n\n foo = repr(biggie)\n\n def test_repr(self):\n buf = StringIO()\n\n # small one\n foo = repr(self.frame)\n self.frame.info(verbose=False, buf=buf)\n\n # even smaller\n self.frame.reindex(columns=['A']).info(verbose=False, buf=buf)\n self.frame.reindex(columns=['A', 'B']).info(verbose=False, buf=buf)\n\n # exhausting cases in DataFrame.info\n\n # columns but no index\n no_index = DataFrame(columns=[0, 1, 3])\n foo = repr(no_index)\n\n # no columns or index\n self.empty.info(buf=buf)\n\n df = DataFrame([\"a\\n\\r\\tb\"], columns=[\"a\\n\\r\\td\"], index=[\"a\\n\\r\\tf\"])\n self.assertFalse(\"\\t\" in repr(df))\n self.assertFalse(\"\\r\" in repr(df))\n self.assertFalse(\"a\\n\" in repr(df))\n\n def test_repr_dimensions(self):\n df = DataFrame([[1, 2,], [3, 4]])\n with pd.option_context('display.show_dimensions', True):\n self.assertTrue(\"2 rows x 2 columns\" in repr(df))\n\n with pd.option_context('display.show_dimensions', False):\n self.assertFalse(\"2 rows x 2 columns\" in repr(df))\n\n with pd.option_context('display.show_dimensions', 'truncate'):\n self.assertFalse(\"2 rows x 2 columns\" in repr(df))\n\n @slow\n def test_repr_big(self):\n buf = StringIO()\n\n # big one\n biggie = DataFrame(np.zeros((200, 4)), columns=lrange(4),\n index=lrange(200))\n foo = repr(biggie)\n\n def test_repr_unsortable(self):\n # columns are not sortable\n import warnings\n warn_filters = warnings.filters\n warnings.filterwarnings('ignore',\n category=FutureWarning,\n module=\".*format\")\n\n unsortable = DataFrame({'foo': [1] * 50,\n datetime.today(): [1] * 50,\n 'bar': ['bar'] * 50,\n datetime.today(\n ) + timedelta(1): ['bar'] * 50},\n index=np.arange(50))\n foo = repr(unsortable)\n\n fmt.set_option('display.precision', 3, 'display.column_space', 10)\n repr(self.frame)\n\n fmt.set_option('display.max_rows', 10, 'display.max_columns', 2)\n repr(self.frame)\n\n fmt.set_option('display.max_rows', 1000, 'display.max_columns', 1000)\n repr(self.frame)\n\n self.reset_display_options()\n\n warnings.filters = warn_filters\n\n def test_repr_unicode(self):\n uval = u('\\u03c3\\u03c3\\u03c3\\u03c3')\n bval = uval.encode('utf-8')\n df = DataFrame({'A': [uval, uval]})\n\n result = repr(df)\n ex_top = ' A'\n self.assertEqual(result.split('\\n')[0].rstrip(), ex_top)\n\n df = DataFrame({'A': [uval, uval]})\n result = repr(df)\n self.assertEqual(result.split('\\n')[0].rstrip(), ex_top)\n\n def test_unicode_string_with_unicode(self):\n df = DataFrame({'A': [u(\"\\u05d0\")]})\n\n if compat.PY3:\n str(df)\n else:\n compat.text_type(df)\n\n def test_bytestring_with_unicode(self):\n df = DataFrame({'A': [u(\"\\u05d0\")]})\n if compat.PY3:\n bytes(df)\n else:\n str(df)\n\n def test_very_wide_info_repr(self):\n df = DataFrame(np.random.randn(10, 20),\n columns=[tm.rands(10) for _ in range(20)])\n repr(df)\n\n def test_repr_column_name_unicode_truncation_bug(self):\n # #1906\n df = DataFrame({'Id': [7117434],\n 'StringCol': ('Is it possible to modify drop plot code'\n ' so that the output graph is displayed '\n 'in iphone simulator, Is it possible to '\n 'modify drop plot code so that the '\n 'output graph is \\xe2\\x80\\xa8displayed '\n 'in iphone simulator.Now we are adding '\n 'the CSV file externally. I want to Call'\n ' the File through the code..')})\n\n result = repr(df)\n self.assertIn('StringCol', result)\n\n def test_head_tail(self):\n assert_frame_equal(self.frame.head(), self.frame[:5])\n assert_frame_equal(self.frame.tail(), self.frame[-5:])\n assert_frame_equal(self.frame.head(0), self.frame)\n assert_frame_equal(self.frame.tail(0), self.frame)\n assert_frame_equal(self.frame.head(-1), self.frame[:-1])\n assert_frame_equal(self.frame.tail(-1), self.frame[1:])\n assert_frame_equal(self.frame.head(1), self.frame[:1])\n assert_frame_equal(self.frame.tail(1), self.frame[-1:])\n # with a float index\n df = self.frame.copy()\n df.index = np.arange(len(self.frame)) + 0.1\n assert_frame_equal(df.head(), df.iloc[:5])\n assert_frame_equal(df.tail(), df.iloc[-5:])\n assert_frame_equal(df.head(0), df)\n assert_frame_equal(df.tail(0), df)\n assert_frame_equal(df.head(-1), df.iloc[:-1])\n assert_frame_equal(df.tail(-1), df.iloc[1:])\n #test empty dataframe\n empty_df = DataFrame()\n assert_frame_equal(empty_df.tail(), empty_df)\n assert_frame_equal(empty_df.head(), empty_df)\n\n def test_insert(self):\n df = DataFrame(np.random.randn(5, 3), index=np.arange(5),\n columns=['c', 'b', 'a'])\n\n df.insert(0, 'foo', df['a'])\n self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'b', 'a'])\n assert_almost_equal(df['a'], df['foo'])\n\n df.insert(2, 'bar', df['c'])\n self.assert_numpy_array_equal(df.columns, ['foo', 'c', 'bar', 'b', 'a'])\n assert_almost_equal(df['c'], df['bar'])\n\n # diff dtype\n\n # new item\n df['x'] = df['a'].astype('float32')\n result = Series(dict(float64 = 5, float32 = 1))\n self.assertTrue((df.get_dtype_counts() == result).all())\n\n # replacing current (in different block)\n df['a'] = df['a'].astype('float32')\n result = Series(dict(float64 = 4, float32 = 2))\n self.assertTrue((df.get_dtype_counts() == result).all())\n\n df['y'] = df['a'].astype('int32')\n result = Series(dict(float64 = 4, float32 = 2, int32 = 1))\n self.assertTrue((df.get_dtype_counts() == result).all())\n\n with assertRaisesRegexp(ValueError, 'already exists'):\n df.insert(1, 'a', df['b'])\n self.assertRaises(ValueError, df.insert, 1, 'c', df['b'])\n\n df.columns.name = 'some_name'\n # preserve columns name field\n df.insert(0, 'baz', df['c'])\n self.assertEqual(df.columns.name, 'some_name')\n\n def test_delitem(self):\n del self.frame['A']\n self.assertNotIn('A', self.frame)\n\n def test_pop(self):\n self.frame.columns.name = 'baz'\n\n A = self.frame.pop('A')\n self.assertNotIn('A', self.frame)\n\n self.frame['foo'] = 'bar'\n foo = self.frame.pop('foo')\n self.assertNotIn('foo', self.frame)\n # TODO self.assertEqual(self.frame.columns.name, 'baz')\n\n def test_pop_non_unique_cols(self):\n df = DataFrame({0: [0, 1], 1: [0, 1], 2: [4, 5]})\n df.columns = [\"a\", \"b\", \"a\"]\n\n res = df.pop(\"a\")\n self.assertEqual(type(res), DataFrame)\n self.assertEqual(len(res), 2)\n self.assertEqual(len(df.columns), 1)\n self.assertTrue(\"b\" in df.columns)\n self.assertFalse(\"a\" in df.columns)\n self.assertEqual(len(df.index), 2)\n\n def test_iter(self):\n self.assertTrue(tm.equalContents(list(self.frame), self.frame.columns))\n\n def test_iterrows(self):\n for i, (k, v) in enumerate(self.frame.iterrows()):\n exp = self.frame.xs(self.frame.index[i])\n assert_series_equal(v, exp)\n\n for i, (k, v) in enumerate(self.mixed_frame.iterrows()):\n exp = self.mixed_frame.xs(self.mixed_frame.index[i])\n assert_series_equal(v, exp)\n\n def test_itertuples(self):\n for i, tup in enumerate(self.frame.itertuples()):\n s = Series(tup[1:])\n s.name = tup[0]\n expected = self.frame.ix[i, :].reset_index(drop=True)\n assert_series_equal(s, expected)\n\n df = DataFrame({'floats': np.random.randn(5),\n 'ints': lrange(5)}, columns=['floats', 'ints'])\n\n for tup in df.itertuples(index=False):\n tm.assert_isinstance(tup[1], np.integer)\n\n df = DataFrame(data={\"a\": [1, 2, 3], \"b\": [4, 5, 6]})\n dfaa = df[['a', 'a']]\n self.assertEqual(list(dfaa.itertuples()), [(0, 1, 1), (1, 2, 2), (2, 3, 3)])\n\n def test_len(self):\n self.assertEqual(len(self.frame), len(self.frame.index))\n\n def test_operators(self):\n garbage = random.random(4)\n colSeries = Series(garbage, index=np.array(self.frame.columns))\n\n idSum = self.frame + self.frame\n seriesSum = self.frame + colSeries\n\n for col, series in compat.iteritems(idSum):\n for idx, val in compat.iteritems(series):\n origVal = self.frame[col][idx] * 2\n if not np.isnan(val):\n self.assertEqual(val, origVal)\n else:\n self.assertTrue(np.isnan(origVal))\n\n for col, series in compat.iteritems(seriesSum):\n for idx, val in compat.iteritems(series):\n origVal = self.frame[col][idx] + colSeries[col]\n if not np.isnan(val):\n self.assertEqual(val, origVal)\n else:\n self.assertTrue(np.isnan(origVal))\n\n added = self.frame2 + self.frame2\n expected = self.frame2 * 2\n assert_frame_equal(added, expected)\n\n df = DataFrame({'a': ['a', None, 'b']})\n assert_frame_equal(df + df, DataFrame({'a': ['aa', np.nan, 'bb']}))\n\n def test_operators_boolean(self):\n\n # GH 5808\n # empty frames, non-mixed dtype\n\n result = DataFrame(index=[1]) & DataFrame(index=[1])\n assert_frame_equal(result,DataFrame(index=[1]))\n\n result = DataFrame(index=[1]) | DataFrame(index=[1])\n assert_frame_equal(result,DataFrame(index=[1]))\n\n result = DataFrame(index=[1]) & DataFrame(index=[1,2])\n assert_frame_equal(result,DataFrame(index=[1,2]))\n\n result = DataFrame(index=[1],columns=['A']) & DataFrame(index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(index=[1],columns=['A']))\n\n result = DataFrame(True,index=[1],columns=['A']) & DataFrame(True,index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))\n\n result = DataFrame(True,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(True,index=[1],columns=['A']))\n\n # boolean ops\n result = DataFrame(1,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n assert_frame_equal(result,DataFrame(1,index=[1],columns=['A']))\n\n def f():\n DataFrame(1.0,index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n self.assertRaises(TypeError, f)\n\n def f():\n DataFrame('foo',index=[1],columns=['A']) | DataFrame(True,index=[1],columns=['A'])\n self.assertRaises(TypeError, f)\n\n def test_operators_none_as_na(self):\n df = DataFrame({\"col1\": [2, 5.0, 123, None],\n \"col2\": [1, 2, 3, 4]}, dtype=object)\n\n ops = [operator.add, operator.sub, operator.mul, operator.truediv]\n\n # since filling converts dtypes from object, changed expected to be object\n for op in ops:\n filled = df.fillna(np.nan)\n result = op(df, 3)\n expected = op(filled, 3).astype(object)\n expected[com.isnull(expected)] = None\n assert_frame_equal(result, expected)\n\n result = op(df, df)\n expected = op(filled, filled).astype(object)\n expected[com.isnull(expected)] = None\n assert_frame_equal(result, expected)\n\n result = op(df, df.fillna(7))\n assert_frame_equal(result, expected)\n\n result = op(df.fillna(7), df)\n assert_frame_equal(result, expected, check_dtype=False)\n\n def test_comparison_invalid(self):\n\n def check(df,df2):\n\n for (x, y) in [(df,df2),(df2,df)]:\n self.assertRaises(TypeError, lambda : x == y)\n self.assertRaises(TypeError, lambda : x != y)\n self.assertRaises(TypeError, lambda : x >= y)\n self.assertRaises(TypeError, lambda : x > y)\n self.assertRaises(TypeError, lambda : x < y)\n self.assertRaises(TypeError, lambda : x <= y)\n\n # GH4968\n # invalid date/int comparisons\n df = DataFrame(np.random.randint(10, size=(10, 1)), columns=['a'])\n df['dates'] = date_range('20010101', periods=len(df))\n\n df2 = df.copy()\n df2['dates'] = df['a']\n check(df,df2)\n\n df = DataFrame(np.random.randint(10, size=(10, 2)), columns=['a', 'b'])\n df2 = DataFrame({'a': date_range('20010101', periods=len(df)), 'b': date_range('20100101', periods=len(df))})\n check(df,df2)\n\n def test_timestamp_compare(self):\n # make sure we can compare Timestamps on the right AND left hand side\n # GH4982\n df = DataFrame({'dates1': date_range('20010101', periods=10),\n 'dates2': date_range('20010102', periods=10),\n 'intcol': np.random.randint(1000000000, size=10),\n 'floatcol': np.random.randn(10),\n 'stringcol': list(tm.rands(10))})\n df.loc[np.random.rand(len(df)) > 0.5, 'dates2'] = pd.NaT\n ops = {'gt': 'lt', 'lt': 'gt', 'ge': 'le', 'le': 'ge', 'eq': 'eq',\n 'ne': 'ne'}\n for left, right in ops.items():\n left_f = getattr(operator, left)\n right_f = getattr(operator, right)\n\n # no nats\n expected = left_f(df, Timestamp('20010109'))\n result = right_f(Timestamp('20010109'), df)\n tm.assert_frame_equal(result, expected)\n\n # nats\n expected = left_f(df, Timestamp('nat'))\n result = right_f(Timestamp('nat'), df)\n tm.assert_frame_equal(result, expected)\n\n def test_modulo(self):\n\n # GH3590, modulo as ints\n p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })\n\n ### this is technically wrong as the integer portion is coerced to float ###\n expected = DataFrame({ 'first' : Series([0,0,0,0],dtype='float64'), 'second' : Series([np.nan,np.nan,np.nan,0]) })\n result = p % p\n assert_frame_equal(result,expected)\n\n # numpy has a slightly different (wrong) treatement\n result2 = DataFrame(p.values % p.values,index=p.index,columns=p.columns,dtype='float64')\n result2.iloc[0:3,1] = np.nan\n assert_frame_equal(result2,expected)\n\n result = p % 0\n expected = DataFrame(np.nan,index=p.index,columns=p.columns)\n assert_frame_equal(result,expected)\n\n # numpy has a slightly different (wrong) treatement\n result2 = DataFrame(p.values.astype('float64') % 0,index=p.index,columns=p.columns)\n assert_frame_equal(result2,expected)\n\n # not commutative with series\n p = DataFrame(np.random.randn(10, 5))\n s = p[0]\n res = s % p\n res2 = p % s\n self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))\n\n def test_div(self):\n\n # integer div, but deal with the 0's\n p = DataFrame({ 'first' : [3,4,5,8], 'second' : [0,0,0,3] })\n result = p / p\n\n ### this is technically wrong as the integer portion is coerced to float ###\n expected = DataFrame({ 'first' : Series([1,1,1,1],dtype='float64'), 'second' : Series([np.inf,np.inf,np.inf,1]) })\n assert_frame_equal(result,expected)\n\n result2 = DataFrame(p.values.astype('float64')/p.values,index=p.index,columns=p.columns).fillna(np.inf)\n assert_frame_equal(result2,expected)\n\n result = p / 0\n expected = DataFrame(np.inf,index=p.index,columns=p.columns)\n assert_frame_equal(result,expected)\n\n # numpy has a slightly different (wrong) treatement\n result2 = DataFrame(p.values.astype('float64')/0,index=p.index,columns=p.columns).fillna(np.inf)\n assert_frame_equal(result2,expected)\n\n p = DataFrame(np.random.randn(10, 5))\n s = p[0]\n res = s / p\n res2 = p / s\n self.assertFalse(np.array_equal(res.fillna(0), res2.fillna(0)))\n\n def test_logical_operators(self):\n import operator\n\n def _check_bin_op(op):\n result = op(df1, df2)\n expected = DataFrame(op(df1.values, df2.values), index=df1.index,\n columns=df1.columns)\n self.assertEqual(result.values.dtype, np.bool_)\n assert_frame_equal(result, expected)\n\n def _check_unary_op(op):\n result = op(df1)\n expected = DataFrame(op(df1.values), index=df1.index,\n columns=df1.columns)\n self.assertEqual(result.values.dtype, np.bool_)\n assert_frame_equal(result, expected)\n\n df1 = {'a': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},\n 'b': {'a': False, 'b': True, 'c': False,\n 'd': False, 'e': False},\n 'c': {'a': False, 'b': False, 'c': True,\n 'd': False, 'e': False},\n 'd': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True},\n 'e': {'a': True, 'b': False, 'c': False, 'd': True, 'e': True}}\n\n df2 = {'a': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},\n 'b': {'a': False, 'b': True, 'c': False,\n 'd': False, 'e': False},\n 'c': {'a': True, 'b': False, 'c': True, 'd': False, 'e': False},\n 'd': {'a': False, 'b': False, 'c': False,\n 'd': True, 'e': False},\n 'e': {'a': False, 'b': False, 'c': False,\n 'd': False, 'e': True}}\n\n df1 = DataFrame(df1)\n df2 = DataFrame(df2)\n\n _check_bin_op(operator.and_)\n _check_bin_op(operator.or_)\n _check_bin_op(operator.xor)\n\n # operator.neg is deprecated in numpy >= 1.9\n _check_unary_op(operator.inv)\n\n def test_logical_typeerror(self):\n if not compat.PY3:\n self.assertRaises(TypeError, self.frame.__eq__, 'foo')\n self.assertRaises(TypeError, self.frame.__lt__, 'foo')\n self.assertRaises(TypeError, self.frame.__gt__, 'foo')\n self.assertRaises(TypeError, self.frame.__ne__, 'foo')\n else:\n raise nose.SkipTest('test_logical_typeerror not tested on PY3')\n\n def test_constructor_lists_to_object_dtype(self):\n # from #1074\n d = DataFrame({'a': [np.nan, False]})\n self.assertEqual(d['a'].dtype, np.object_)\n self.assertFalse(d['a'][1])\n\n def test_constructor_with_nas(self):\n # GH 5016\n # na's in indicies\n\n def check(df):\n for i in range(len(df.columns)):\n df.iloc[:,i]\n\n # allow single nans to succeed\n indexer = np.arange(len(df.columns))[isnull(df.columns)]\n\n if len(indexer) == 1:\n assert_series_equal(df.iloc[:,indexer[0]],df.loc[:,np.nan])\n\n\n # multiple nans should fail\n else:\n\n def f():\n df.loc[:,np.nan]\n self.assertRaises(ValueError, f)\n\n\n df = DataFrame([[1,2,3],[4,5,6]], index=[1,np.nan])\n check(df)\n\n df = DataFrame([[1,2,3],[4,5,6]], columns=[1.1,2.2,np.nan])\n check(df)\n\n df = DataFrame([[0,1,2,3],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])\n check(df)\n\n df = DataFrame([[0.0,1,2,3.0],[4,5,6,7]], columns=[np.nan,1.1,2.2,np.nan])\n check(df)\n\n def test_logical_with_nas(self):\n d = DataFrame({'a': [np.nan, False], 'b': [True, True]})\n\n # GH4947\n # bool comparisons should return bool\n result = d['a'] | d['b']\n expected = Series([False, True])\n assert_series_equal(result, expected)\n\n # GH4604, automatic casting here\n result = d['a'].fillna(False) | d['b']\n expected = Series([True, True])\n assert_series_equal(result, expected)\n\n result = d['a'].fillna(False,downcast=False) | d['b']\n expected = Series([True, True])\n assert_series_equal(result, expected)\n\n def test_neg(self):\n # what to do?\n assert_frame_equal(-self.frame, -1 * self.frame)\n\n def test_invert(self):\n assert_frame_equal(-(self.frame < 0), ~(self.frame < 0))\n\n def test_first_last_valid(self):\n N = len(self.frame.index)\n mat = randn(N)\n mat[:5] = nan\n mat[-5:] = nan\n\n frame = DataFrame({'foo': mat}, index=self.frame.index)\n index = frame.first_valid_index()\n\n self.assertEqual(index, frame.index[5])\n\n index = frame.last_valid_index()\n self.assertEqual(index, frame.index[-6])\n\n def test_arith_flex_frame(self):\n ops = ['add', 'sub', 'mul', 'div', 'truediv', 'pow', 'floordiv', 'mod']\n if not compat.PY3:\n aliases = {}\n else:\n aliases = {'div': 'truediv'}\n\n for op in ops:\n try:\n alias = aliases.get(op, op)\n f = getattr(operator, alias)\n result = getattr(self.frame, op)(2 * self.frame)\n exp = f(self.frame, 2 * self.frame)\n assert_frame_equal(result, exp)\n\n # vs mix float\n result = getattr(self.mixed_float, op)(2 * self.mixed_float)\n exp = f(self.mixed_float, 2 * self.mixed_float)\n assert_frame_equal(result, exp)\n _check_mixed_float(result, dtype = dict(C = None))\n\n # vs mix int\n if op in ['add','sub','mul']:\n result = getattr(self.mixed_int, op)(2 + self.mixed_int)\n exp = f(self.mixed_int, 2 + self.mixed_int)\n\n # overflow in the uint\n dtype = None\n if op in ['sub']:\n dtype = dict(B = 'object', C = None)\n elif op in ['add','mul']:\n dtype = dict(C = None)\n assert_frame_equal(result, exp)\n _check_mixed_int(result, dtype = dtype)\n\n # rops\n r_f = lambda x, y: f(y, x)\n result = getattr(self.frame, 'r' + op)(2 * self.frame)\n exp = r_f(self.frame, 2 * self.frame)\n assert_frame_equal(result, exp)\n\n # vs mix float\n result = getattr(self.mixed_float, op)(2 * self.mixed_float)\n exp = f(self.mixed_float, 2 * self.mixed_float)\n assert_frame_equal(result, exp)\n _check_mixed_float(result, dtype = dict(C = None))\n\n result = getattr(self.intframe, op)(2 * self.intframe)\n exp = f(self.intframe, 2 * self.intframe)\n assert_frame_equal(result, exp)\n\n # vs mix int\n if op in ['add','sub','mul']:\n result = getattr(self.mixed_int, op)(2 + self.mixed_int)\n exp = f(self.mixed_int, 2 + self.mixed_int)\n\n # overflow in the uint\n dtype = None\n if op in ['sub']:\n dtype = dict(B = 'object', C = None)\n elif op in ['add','mul']:\n dtype = dict(C = None)\n assert_frame_equal(result, exp)\n _check_mixed_int(result, dtype = dtype)\n except:\n com.pprint_thing(\"Failing operation %r\" % op)\n raise\n\n # ndim >= 3\n ndim_5 = np.ones(self.frame.shape + (3, 4, 5))\n with assertRaisesRegexp(ValueError, 'shape'):\n f(self.frame, ndim_5)\n\n with assertRaisesRegexp(ValueError, 'shape'):\n getattr(self.frame, op)(ndim_5)\n\n\n # res_add = self.frame.add(self.frame)\n # res_sub = self.frame.sub(self.frame)\n # res_mul = self.frame.mul(self.frame)\n # res_div = self.frame.div(2 * self.frame)\n\n # assert_frame_equal(res_add, self.frame + self.frame)\n # assert_frame_equal(res_sub, self.frame - self.frame)\n # assert_frame_equal(res_mul, self.frame * self.frame)\n # assert_frame_equal(res_div, self.frame / (2 * self.frame))\n\n const_add = self.frame.add(1)\n assert_frame_equal(const_add, self.frame + 1)\n\n # corner cases\n result = self.frame.add(self.frame[:0])\n assert_frame_equal(result, self.frame * np.nan)\n\n result = self.frame[:0].add(self.frame)\n assert_frame_equal(result, self.frame * np.nan)\n with assertRaisesRegexp(NotImplementedError, 'fill_value'):\n self.frame.add(self.frame.irow(0), fill_value=3)\n with assertRaisesRegexp(NotImplementedError, 'fill_value'):\n self.frame.add(self.frame.irow(0), axis='index', fill_value=3)\n\n def test_binary_ops_align(self):\n\n # test aligning binary ops\n\n # GH 6681\n index=MultiIndex.from_product([list('abc'),\n ['one','two','three'],\n [1,2,3]],\n names=['first','second','third'])\n\n df = DataFrame(np.arange(27*3).reshape(27,3),\n index=index,\n columns=['value1','value2','value3']).sortlevel()\n\n idx = pd.IndexSlice\n for op in ['add','sub','mul','div','truediv']:\n opa = getattr(operator,op,None)\n if opa is None:\n continue\n\n x = Series([ 1.0, 10.0, 100.0], [1,2,3])\n result = getattr(df,op)(x,level='third',axis=0)\n\n expected = pd.concat([ opa(df.loc[idx[:,:,i],:],v) for i, v in x.iteritems() ]).sortlevel()\n assert_frame_equal(result, expected)\n\n x = Series([ 1.0, 10.0], ['two','three'])\n result = getattr(df,op)(x,level='second',axis=0)\n\n expected = pd.concat([ opa(df.loc[idx[:,i],:],v) for i, v in x.iteritems() ]).reindex_like(df).sortlevel()\n assert_frame_equal(result, expected)\n\n def test_arith_mixed(self):\n\n left = DataFrame({'A': ['a', 'b', 'c'],\n 'B': [1, 2, 3]})\n\n result = left + left\n expected = DataFrame({'A': ['aa', 'bb', 'cc'],\n 'B': [2, 4, 6]})\n assert_frame_equal(result, expected)\n\n def test_arith_getitem_commute(self):\n df = DataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})\n\n self._test_op(df, operator.add)\n self._test_op(df, operator.sub)\n self._test_op(df, operator.mul)\n self._test_op(df, operator.truediv)\n self._test_op(df, operator.floordiv)\n self._test_op(df, operator.pow)\n\n self._test_op(df, lambda x, y: y + x)\n self._test_op(df, lambda x, y: y - x)\n self._test_op(df, lambda x, y: y * x)\n self._test_op(df, lambda x, y: y / x)\n self._test_op(df, lambda x, y: y ** x)\n\n self._test_op(df, lambda x, y: x + y)\n self._test_op(df, lambda x, y: x - y)\n self._test_op(df, lambda x, y: x * y)\n self._test_op(df, lambda x, y: x / y)\n self._test_op(df, lambda x, y: x ** y)\n\n @staticmethod\n def _test_op(df, op):\n result = op(df, 1)\n\n if not df.columns.is_unique:\n raise ValueError(\"Only unique columns supported by this test\")\n\n for col in result.columns:\n assert_series_equal(result[col], op(df[col], 1))\n\n def test_bool_flex_frame(self):\n data = np.random.randn(5, 3)\n other_data = np.random.randn(5, 3)\n df = DataFrame(data)\n other = DataFrame(other_data)\n ndim_5 = np.ones(df.shape + (1, 3))\n\n # Unaligned\n def _check_unaligned_frame(meth, op, df, other):\n part_o = other.ix[3:, 1:].copy()\n rs = meth(part_o)\n xp = op(df, part_o.reindex(index=df.index, columns=df.columns))\n assert_frame_equal(rs, xp)\n\n # DataFrame\n self.assertTrue(df.eq(df).values.all())\n self.assertFalse(df.ne(df).values.any())\n for op in ['eq', 'ne', 'gt', 'lt', 'ge', 'le']:\n f = getattr(df, op)\n o = getattr(operator, op)\n # No NAs\n assert_frame_equal(f(other), o(df, other))\n _check_unaligned_frame(f, o, df, other)\n # ndarray\n assert_frame_equal(f(other.values), o(df, other.values))\n # scalar\n assert_frame_equal(f(0), o(df, 0))\n # NAs\n assert_frame_equal(f(np.nan), o(df, np.nan))\n with assertRaisesRegexp(ValueError, 'shape'):\n f(ndim_5)\n\n # Series\n def _test_seq(df, idx_ser, col_ser):\n idx_eq = df.eq(idx_ser, axis=0)\n col_eq = df.eq(col_ser)\n idx_ne = df.ne(idx_ser, axis=0)\n col_ne = df.ne(col_ser)\n assert_frame_equal(col_eq, df == Series(col_ser))\n assert_frame_equal(col_eq, -col_ne)\n assert_frame_equal(idx_eq, -idx_ne)\n assert_frame_equal(idx_eq, df.T.eq(idx_ser).T)\n assert_frame_equal(col_eq, df.eq(list(col_ser)))\n assert_frame_equal(idx_eq, df.eq(Series(idx_ser), axis=0))\n assert_frame_equal(idx_eq, df.eq(list(idx_ser), axis=0))\n\n idx_gt = df.gt(idx_ser, axis=0)\n col_gt = df.gt(col_ser)\n idx_le = df.le(idx_ser, axis=0)\n col_le = df.le(col_ser)\n\n assert_frame_equal(col_gt, df > Series(col_ser))\n assert_frame_equal(col_gt, -col_le)\n assert_frame_equal(idx_gt, -idx_le)\n assert_frame_equal(idx_gt, df.T.gt(idx_ser).T)\n\n idx_ge = df.ge(idx_ser, axis=0)\n col_ge = df.ge(col_ser)\n idx_lt = df.lt(idx_ser, axis=0)\n col_lt = df.lt(col_ser)\n assert_frame_equal(col_ge, df >= Series(col_ser))\n assert_frame_equal(col_ge, -col_lt)\n assert_frame_equal(idx_ge, -idx_lt)\n assert_frame_equal(idx_ge, df.T.ge(idx_ser).T)\n\n idx_ser = Series(np.random.randn(5))\n col_ser = Series(np.random.randn(3))\n _test_seq(df, idx_ser, col_ser)\n\n\n # list/tuple\n _test_seq(df, idx_ser.values, col_ser.values)\n\n # NA\n df.ix[0, 0] = np.nan\n rs = df.eq(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.ne(df)\n self.assertTrue(rs.ix[0, 0])\n rs = df.gt(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.lt(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.ge(df)\n self.assertFalse(rs.ix[0, 0])\n rs = df.le(df)\n self.assertFalse(rs.ix[0, 0])\n\n\n\n # complex\n arr = np.array([np.nan, 1, 6, np.nan])\n arr2 = np.array([2j, np.nan, 7, None])\n df = DataFrame({'a': arr})\n df2 = DataFrame({'a': arr2})\n rs = df.gt(df2)\n self.assertFalse(rs.values.any())\n rs = df.ne(df2)\n self.assertTrue(rs.values.all())\n\n arr3 = np.array([2j, np.nan, None])\n df3 = DataFrame({'a': arr3})\n rs = df3.gt(2j)\n self.assertFalse(rs.values.any())\n\n # corner, dtype=object\n df1 = DataFrame({'col': ['foo', np.nan, 'bar']})\n df2 = DataFrame({'col': ['foo', datetime.now(), 'bar']})\n result = df1.ne(df2)\n exp = DataFrame({'col': [False, True, False]})\n assert_frame_equal(result, exp)\n\n def test_arith_flex_series(self):\n df = self.simple\n\n row = df.xs('a')\n col = df['two']\n # after arithmetic refactor, add truediv here\n ops = ['add', 'sub', 'mul', 'mod']\n for op in ops:\n f = getattr(df, op)\n op = getattr(operator, op)\n assert_frame_equal(f(row), op(df, row))\n assert_frame_equal(f(col, axis=0), op(df.T, col).T)\n\n # special case for some reason\n assert_frame_equal(df.add(row, axis=None), df + row)\n\n # cases which will be refactored after big arithmetic refactor\n assert_frame_equal(df.div(row), df / row)\n assert_frame_equal(df.div(col, axis=0), (df.T / col).T)\n\n # broadcasting issue in GH7325\n df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='int64')\n expected = DataFrame([[np.inf,np.inf],[1.0,1.5],[1.0,1.25]])\n result = df.div(df[0],axis='index')\n assert_frame_equal(result,expected)\n\n df = DataFrame(np.arange(3*2).reshape((3,2)),dtype='float64')\n expected = DataFrame([[np.nan,np.inf],[1.0,1.5],[1.0,1.25]])\n result = df.div(df[0],axis='index')\n assert_frame_equal(result,expected)\n\n def test_arith_non_pandas_object(self):\n df = self.simple\n\n val1 = df.xs('a').values\n added = DataFrame(df.values + val1, index=df.index, columns=df.columns)\n assert_frame_equal(df + val1, added)\n\n added = DataFrame((df.values.T + val1).T,\n index=df.index, columns=df.columns)\n assert_frame_equal(df.add(val1, axis=0), added)\n\n val2 = list(df['two'])\n\n added = DataFrame(df.values + val2, index=df.index, columns=df.columns)\n assert_frame_equal(df + val2, added)\n\n added = DataFrame((df.values.T + val2).T, index=df.index,\n columns=df.columns)\n assert_frame_equal(df.add(val2, axis='index'), added)\n\n val3 = np.random.rand(*df.shape)\n added = DataFrame(df.values + val3, index=df.index, columns=df.columns)\n assert_frame_equal(df.add(val3), added)\n\n def test_combineFrame(self):\n frame_copy = self.frame.reindex(self.frame.index[::2])\n\n del frame_copy['D']\n frame_copy['C'][:5] = nan\n\n added = self.frame + frame_copy\n tm.assert_dict_equal(added['A'].valid(),\n self.frame['A'] * 2,\n compare_keys=False)\n\n self.assertTrue(np.isnan(added['C'].reindex(frame_copy.index)[:5]).all())\n\n # assert(False)\n\n self.assertTrue(np.isnan(added['D']).all())\n\n self_added = self.frame + self.frame\n self.assertTrue(self_added.index.equals(self.frame.index))\n\n added_rev = frame_copy + self.frame\n self.assertTrue(np.isnan(added['D']).all())\n\n # corner cases\n\n # empty\n plus_empty = self.frame + self.empty\n self.assertTrue(np.isnan(plus_empty.values).all())\n\n empty_plus = self.empty + self.frame\n self.assertTrue(np.isnan(empty_plus.values).all())\n\n empty_empty = self.empty + self.empty\n self.assertTrue(empty_empty.empty)\n\n # out of order\n reverse = self.frame.reindex(columns=self.frame.columns[::-1])\n\n assert_frame_equal(reverse + self.frame, self.frame * 2)\n\n # mix vs float64, upcast\n added = self.frame + self.mixed_float\n _check_mixed_float(added, dtype = 'float64')\n added = self.mixed_float + self.frame\n _check_mixed_float(added, dtype = 'float64')\n\n # mix vs mix\n added = self.mixed_float + self.mixed_float2\n _check_mixed_float(added, dtype = dict(C = None))\n added = self.mixed_float2 + self.mixed_float\n _check_mixed_float(added, dtype = dict(C = None))\n\n # with int\n added = self.frame + self.mixed_int\n _check_mixed_float(added, dtype = 'float64')\n\n def test_combineSeries(self):\n\n # Series\n series = self.frame.xs(self.frame.index[0])\n\n added = self.frame + series\n\n for key, s in compat.iteritems(added):\n assert_series_equal(s, self.frame[key] + series[key])\n\n larger_series = series.to_dict()\n larger_series['E'] = 1\n larger_series = Series(larger_series)\n larger_added = self.frame + larger_series\n\n for key, s in compat.iteritems(self.frame):\n assert_series_equal(larger_added[key], s + series[key])\n self.assertIn('E', larger_added)\n self.assertTrue(np.isnan(larger_added['E']).all())\n\n # vs mix (upcast) as needed\n added = self.mixed_float + series\n _check_mixed_float(added, dtype = 'float64')\n added = self.mixed_float + series.astype('float32')\n _check_mixed_float(added, dtype = dict(C = None))\n added = self.mixed_float + series.astype('float16')\n _check_mixed_float(added, dtype = dict(C = None))\n\n #### these raise with numexpr.....as we are adding an int64 to an uint64....weird\n # vs int\n #added = self.mixed_int + (100*series).astype('int64')\n #_check_mixed_int(added, dtype = dict(A = 'int64', B = 'float64', C = 'int64', D = 'int64'))\n #added = self.mixed_int + (100*series).astype('int32')\n #_check_mixed_int(added, dtype = dict(A = 'int32', B = 'float64', C = 'int32', D = 'int64'))\n\n # TimeSeries\n buf = StringIO()\n tmp = sys.stderr\n sys.stderr = buf\n\n try:\n ts = self.tsframe['A']\n added = self.tsframe + ts\n\n for key, col in compat.iteritems(self.tsframe):\n assert_series_equal(added[key], col + ts)\n\n smaller_frame = self.tsframe[:-5]\n smaller_added = smaller_frame + ts\n\n self.assertTrue(smaller_added.index.equals(self.tsframe.index))\n\n smaller_ts = ts[:-5]\n smaller_added2 = self.tsframe + smaller_ts\n assert_frame_equal(smaller_added, smaller_added2)\n\n # length 0\n result = self.tsframe + ts[:0]\n\n # Frame is length 0\n result = self.tsframe[:0] + ts\n self.assertEqual(len(result), 0)\n\n # empty but with non-empty index\n frame = self.tsframe[:1].reindex(columns=[])\n result = frame * ts\n self.assertEqual(len(result), len(ts))\n finally:\n sys.stderr = tmp\n\n def test_combineFunc(self):\n result = self.frame * 2\n self.assert_numpy_array_equal(result.values, self.frame.values * 2)\n\n # vs mix\n result = self.mixed_float * 2\n for c, s in compat.iteritems(result):\n self.assert_numpy_array_equal(s.values, self.mixed_float[c].values * 2)\n _check_mixed_float(result, dtype = dict(C = None))\n\n result = self.empty * 2\n self.assertIs(result.index, self.empty.index)\n self.assertEqual(len(result.columns), 0)\n\n def test_comparisons(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame()\n\n row = self.simple.xs('a')\n ndim_5 = np.ones(df1.shape + (1, 1, 1))\n\n def test_comp(func):\n result = func(df1, df2)\n self.assert_numpy_array_equal(result.values,\n func(df1.values, df2.values))\n with assertRaisesRegexp(ValueError, 'Wrong number of dimensions'):\n func(df1, ndim_5)\n\n result2 = func(self.simple, row)\n self.assert_numpy_array_equal(result2.values,\n func(self.simple.values, row.values))\n\n result3 = func(self.frame, 0)\n self.assert_numpy_array_equal(result3.values,\n func(self.frame.values, 0))\n\n\n with assertRaisesRegexp(ValueError, 'Can only compare '\n 'identically-labeled DataFrame'):\n func(self.simple, self.simple[:2])\n\n test_comp(operator.eq)\n test_comp(operator.ne)\n test_comp(operator.lt)\n test_comp(operator.gt)\n test_comp(operator.ge)\n test_comp(operator.le)\n\n def test_string_comparison(self):\n df = DataFrame([{\"a\": 1, \"b\": \"foo\"}, {\"a\": 2, \"b\": \"bar\"}])\n mask_a = df.a > 1\n assert_frame_equal(df[mask_a], df.ix[1:1, :])\n assert_frame_equal(df[-mask_a], df.ix[0:0, :])\n\n mask_b = df.b == \"foo\"\n assert_frame_equal(df[mask_b], df.ix[0:0, :])\n assert_frame_equal(df[-mask_b], df.ix[1:1, :])\n\n def test_float_none_comparison(self):\n df = DataFrame(np.random.randn(8, 3), index=lrange(8),\n columns=['A', 'B', 'C'])\n\n self.assertRaises(TypeError, df.__eq__, None)\n\n def test_boolean_comparison(self):\n\n # GH 4576\n # boolean comparisons with a tuple/list give unexpected results\n df = DataFrame(np.arange(6).reshape((3,2)))\n b = np.array([2, 2])\n b_r = np.atleast_2d([2,2])\n b_c = b_r.T\n l = (2,2,2)\n tup = tuple(l)\n\n # gt\n expected = DataFrame([[False,False],[False,True],[True,True]])\n result = df>b\n assert_frame_equal(result,expected)\n\n result = df.values>b\n assert_array_equal(result,expected.values)\n\n result = df>l\n assert_frame_equal(result,expected)\n\n result = df>tup\n assert_frame_equal(result,expected)\n\n result = df>b_r\n assert_frame_equal(result,expected)\n\n result = df.values>b_r\n assert_array_equal(result,expected.values)\n\n self.assertRaises(ValueError, df.__gt__, b_c)\n self.assertRaises(ValueError, df.values.__gt__, b_c)\n\n # ==\n expected = DataFrame([[False,False],[True,False],[False,False]])\n result = df == b\n assert_frame_equal(result,expected)\n\n result = df==l\n assert_frame_equal(result,expected)\n\n result = df==tup\n assert_frame_equal(result,expected)\n\n result = df == b_r\n assert_frame_equal(result,expected)\n\n result = df.values == b_r\n assert_array_equal(result,expected.values)\n\n self.assertRaises(ValueError, lambda : df == b_c)\n self.assertFalse((df.values == b_c))\n\n # with alignment\n df = DataFrame(np.arange(6).reshape((3,2)),columns=list('AB'),index=list('abc'))\n expected.index=df.index\n expected.columns=df.columns\n\n result = df==l\n assert_frame_equal(result,expected)\n\n result = df==tup\n assert_frame_equal(result,expected)\n\n # not shape compatible\n self.assertRaises(ValueError, lambda : df == (2,2))\n self.assertRaises(ValueError, lambda : df == [2,2])\n\n def test_to_csv_deprecated_options(self):\n\n pname = '__tmp_to_csv_deprecated_options__'\n with ensure_clean(pname) as path:\n\n self.tsframe[1:3] = np.nan\n self.tsframe.to_csv(path, nanRep='foo')\n recons = read_csv(path,index_col=0,parse_dates=[0],na_values=['foo'])\n assert_frame_equal(self.tsframe, recons)\n\n with tm.assert_produces_warning(FutureWarning):\n self.frame.to_csv(path, cols=['A', 'B'])\n\n with tm.assert_produces_warning(False):\n self.frame.to_csv(path, columns=['A', 'B'])\n\n\n def test_to_csv_from_csv(self):\n\n pname = '__tmp_to_csv_from_csv__'\n with ensure_clean(pname) as path:\n\n self.frame['A'][:5] = nan\n\n self.frame.to_csv(path)\n self.frame.to_csv(path, columns=['A', 'B'])\n self.frame.to_csv(path, header=False)\n self.frame.to_csv(path, index=False)\n\n # test roundtrip\n self.tsframe.to_csv(path)\n recons = DataFrame.from_csv(path)\n\n assert_frame_equal(self.tsframe, recons)\n\n self.tsframe.to_csv(path, index_label='index')\n recons = DataFrame.from_csv(path, index_col=None)\n assert(len(recons.columns) == len(self.tsframe.columns) + 1)\n\n # no index\n self.tsframe.to_csv(path, index=False)\n recons = DataFrame.from_csv(path, index_col=None)\n assert_almost_equal(self.tsframe.values, recons.values)\n\n # corner case\n dm = DataFrame({'s1': Series(lrange(3), lrange(3)),\n 's2': Series(lrange(2), lrange(2))})\n dm.to_csv(path)\n recons = DataFrame.from_csv(path)\n assert_frame_equal(dm, recons)\n\n with ensure_clean(pname) as path:\n\n # duplicate index\n df = DataFrame(np.random.randn(3, 3), index=['a', 'a', 'b'],\n columns=['x', 'y', 'z'])\n df.to_csv(path)\n result = DataFrame.from_csv(path)\n assert_frame_equal(result, df)\n\n midx = MultiIndex.from_tuples([('A', 1, 2), ('A', 1, 2), ('B', 1, 2)])\n df = DataFrame(np.random.randn(3, 3), index=midx,\n columns=['x', 'y', 'z'])\n df.to_csv(path)\n result = DataFrame.from_csv(path, index_col=[0, 1, 2],\n parse_dates=False)\n assert_frame_equal(result, df, check_names=False) # TODO from_csv names index ['Unnamed: 1', 'Unnamed: 2'] should it ?\n\n # column aliases\n col_aliases = Index(['AA', 'X', 'Y', 'Z'])\n self.frame2.to_csv(path, header=col_aliases)\n rs = DataFrame.from_csv(path)\n xp = self.frame2.copy()\n xp.columns = col_aliases\n\n assert_frame_equal(xp, rs)\n\n self.assertRaises(ValueError, self.frame2.to_csv, path,\n header=['AA', 'X'])\n\n with ensure_clean(pname) as path:\n import pandas as pd\n df1 = DataFrame(np.random.randn(3, 1))\n df2 = DataFrame(np.random.randn(3, 1))\n\n df1.to_csv(path)\n df2.to_csv(path,mode='a',header=False)\n xp = pd.concat([df1,df2])\n rs = pd.read_csv(path,index_col=0)\n rs.columns = lmap(int,rs.columns)\n xp.columns = lmap(int,xp.columns)\n assert_frame_equal(xp,rs)\n\n def test_to_csv_cols_reordering(self):\n # GH3454\n import pandas as pd\n\n def _check_df(df,cols=None):\n with ensure_clean() as path:\n df.to_csv(path,columns = cols,engine='python')\n rs_p = pd.read_csv(path,index_col=0)\n df.to_csv(path,columns = cols,chunksize=chunksize)\n rs_c = pd.read_csv(path,index_col=0)\n\n if cols:\n df = df[cols]\n assert (rs_c.columns==rs_p.columns).all()\n assert_frame_equal(df,rs_c,check_names=False)\n\n chunksize=5\n N = int(chunksize*2.5)\n\n df= mkdf(N, 3)\n cs = df.columns\n cols = [cs[2],cs[0]]\n _check_df(df,cols)\n\n def test_to_csv_legacy_raises_on_dupe_cols(self):\n df= mkdf(10, 3)\n df.columns = ['a','a','b']\n with ensure_clean() as path:\n self.assertRaises(NotImplementedError,df.to_csv,path,engine='python')\n\n def test_to_csv_new_dupe_cols(self):\n import pandas as pd\n def _check_df(df,cols=None):\n with ensure_clean() as path:\n df.to_csv(path,columns = cols,chunksize=chunksize)\n rs_c = pd.read_csv(path,index_col=0)\n\n # we wrote them in a different order\n # so compare them in that order\n if cols is not None:\n\n if df.columns.is_unique:\n rs_c.columns = cols\n else:\n indexer, missing = df.columns.get_indexer_non_unique(cols)\n rs_c.columns = df.columns.take(indexer)\n\n for c in cols:\n obj_df = df[c]\n obj_rs = rs_c[c]\n if isinstance(obj_df,Series):\n assert_series_equal(obj_df,obj_rs)\n else:\n assert_frame_equal(obj_df,obj_rs,check_names=False)\n\n # wrote in the same order\n else:\n rs_c.columns = df.columns\n assert_frame_equal(df,rs_c,check_names=False)\n\n chunksize=5\n N = int(chunksize*2.5)\n\n # dupe cols\n df= mkdf(N, 3)\n df.columns = ['a','a','b']\n _check_df(df,None)\n\n # dupe cols with selection\n cols = ['b','a']\n _check_df(df,cols)\n\n @slow\n def test_to_csv_moar(self):\n path = '__tmp_to_csv_moar__'\n\n def _do_test(df,path,r_dtype=None,c_dtype=None,rnlvl=None,cnlvl=None,\n dupe_col=False):\n\n kwargs = dict(parse_dates=False)\n if cnlvl:\n if rnlvl is not None:\n kwargs['index_col'] = lrange(rnlvl)\n kwargs['header'] = lrange(cnlvl)\n with ensure_clean(path) as path:\n df.to_csv(path,encoding='utf8',chunksize=chunksize,tupleize_cols=False)\n recons = DataFrame.from_csv(path,tupleize_cols=False,**kwargs)\n else:\n kwargs['header'] = 0\n with ensure_clean(path) as path:\n df.to_csv(path,encoding='utf8',chunksize=chunksize)\n recons = DataFrame.from_csv(path,**kwargs)\n\n def _to_uni(x):\n if not isinstance(x, compat.text_type):\n return x.decode('utf8')\n return x\n if dupe_col:\n # read_Csv disambiguates the columns by\n # labeling them dupe.1,dupe.2, etc'. monkey patch columns\n recons.columns = df.columns\n if rnlvl and not cnlvl:\n delta_lvl = [recons.icol(i).values for i in range(rnlvl-1)]\n ix=MultiIndex.from_arrays([list(recons.index)]+delta_lvl)\n recons.index = ix\n recons = recons.iloc[:,rnlvl-1:]\n\n type_map = dict(i='i',f='f',s='O',u='O',dt='O',p='O')\n if r_dtype:\n if r_dtype == 'u': # unicode\n r_dtype='O'\n recons.index = np.array(lmap(_to_uni,recons.index),\n dtype=r_dtype)\n df.index = np.array(lmap(_to_uni,df.index),dtype=r_dtype)\n elif r_dtype == 'dt': # unicode\n r_dtype='O'\n recons.index = np.array(lmap(Timestamp,recons.index),\n dtype=r_dtype)\n df.index = np.array(lmap(Timestamp,df.index),dtype=r_dtype)\n elif r_dtype == 'p':\n r_dtype='O'\n recons.index = np.array(list(map(Timestamp,\n recons.index.to_datetime())),\n dtype=r_dtype)\n df.index = np.array(list(map(Timestamp,\n df.index.to_datetime())),\n dtype=r_dtype)\n else:\n r_dtype= type_map.get(r_dtype)\n recons.index = np.array(recons.index,dtype=r_dtype )\n df.index = np.array(df.index,dtype=r_dtype )\n if c_dtype:\n if c_dtype == 'u':\n c_dtype='O'\n recons.columns = np.array(lmap(_to_uni,recons.columns),\n dtype=c_dtype)\n df.columns = np.array(lmap(_to_uni,df.columns),dtype=c_dtype )\n elif c_dtype == 'dt':\n c_dtype='O'\n recons.columns = np.array(lmap(Timestamp,recons.columns),\n dtype=c_dtype )\n df.columns = np.array(lmap(Timestamp,df.columns),dtype=c_dtype)\n elif c_dtype == 'p':\n c_dtype='O'\n recons.columns = np.array(lmap(Timestamp,recons.columns.to_datetime()),\n dtype=c_dtype)\n df.columns = np.array(lmap(Timestamp,df.columns.to_datetime()),dtype=c_dtype )\n else:\n c_dtype= type_map.get(c_dtype)\n recons.columns = np.array(recons.columns,dtype=c_dtype )\n df.columns = np.array(df.columns,dtype=c_dtype )\n\n assert_frame_equal(df,recons,check_names=False,check_less_precise=True)\n\n N = 100\n chunksize=1000\n\n # GH3437\n from pandas import NaT\n def make_dtnat_arr(n,nnat=None):\n if nnat is None:\n nnat= int(n*0.1) # 10%\n s=list(date_range('2000',freq='5min',periods=n))\n if nnat:\n for i in np.random.randint(0,len(s),nnat):\n s[i] = NaT\n i = np.random.randint(100)\n s[-i] = NaT\n s[i] = NaT\n return s\n\n # N=35000\n s1=make_dtnat_arr(chunksize+5)\n s2=make_dtnat_arr(chunksize+5,0)\n path = '1.csv'\n\n # s3=make_dtnjat_arr(chunksize+5,0)\n with ensure_clean('.csv') as pth:\n df=DataFrame(dict(a=s1,b=s2))\n df.to_csv(pth,chunksize=chunksize)\n recons = DataFrame.from_csv(pth).convert_objects('coerce')\n assert_frame_equal(df, recons,check_names=False,check_less_precise=True)\n\n for ncols in [4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_type='dt',\n c_idx_type='s'),path, 'dt','s')\n\n\n for ncols in [4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_type='dt',\n c_idx_type='s'),path, 'dt','s')\n pass\n\n for r_idx_type,c_idx_type in [('i','i'),('s','s'),('u','dt'),('p','p')]:\n for ncols in [1,2,3,4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [2,10,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_type=r_idx_type,\n c_idx_type=c_idx_type),path,r_idx_type,c_idx_type)\n\n for ncols in [1,2,3,4]:\n base = int((chunksize// ncols or 1) or 1)\n for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols),path)\n\n for nrows in [10,N-2,N-1,N,N+1,N+2]:\n df = mkdf(nrows, 3)\n cols = list(df.columns)\n cols[:2] = [\"dupe\",\"dupe\"]\n cols[-2:] = [\"dupe\",\"dupe\"]\n ix = list(df.index)\n ix[:2] = [\"rdupe\",\"rdupe\"]\n ix[-2:] = [\"rdupe\",\"rdupe\"]\n df.index=ix\n df.columns=cols\n _do_test(df,path,dupe_col=True)\n\n\n _do_test(DataFrame(index=lrange(10)),path)\n _do_test(mkdf(chunksize//2+1, 2,r_idx_nlevels=2),path,rnlvl=2)\n for ncols in [2,3,4]:\n base = int(chunksize//ncols)\n for nrows in [10,N-2,N-1,N,N+1,N+2,2*N-2,2*N-1,2*N,2*N+1,2*N+2,\n base-1,base,base+1]:\n _do_test(mkdf(nrows, ncols,r_idx_nlevels=2),path,rnlvl=2)\n _do_test(mkdf(nrows, ncols,c_idx_nlevels=2),path,cnlvl=2)\n _do_test(mkdf(nrows, ncols,r_idx_nlevels=2,c_idx_nlevels=2),\n path,rnlvl=2,cnlvl=2)\n\n def test_to_csv_from_csv_w_some_infs(self):\n\n # test roundtrip with inf, -inf, nan, as full columns and mix\n self.frame['G'] = np.nan\n f = lambda x: [np.inf, np.nan][np.random.rand() < .5]\n self.frame['H'] = self.frame.index.map(f)\n\n with ensure_clean() as path:\n self.frame.to_csv(path)\n recons = DataFrame.from_csv(path)\n\n assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name\n assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)\n\n def test_to_csv_from_csv_w_all_infs(self):\n\n # test roundtrip with inf, -inf, nan, as full columns and mix\n self.frame['E'] = np.inf\n self.frame['F'] = -np.inf\n\n with ensure_clean() as path:\n self.frame.to_csv(path)\n recons = DataFrame.from_csv(path)\n\n assert_frame_equal(self.frame, recons, check_names=False) # TODO to_csv drops column name\n assert_frame_equal(np.isinf(self.frame), np.isinf(recons), check_names=False)\n\n def test_to_csv_no_index(self):\n # GH 3624, after appending columns, to_csv fails\n pname = '__tmp_to_csv_no_index__'\n with ensure_clean(pname) as path:\n df = DataFrame({'c1':[1,2,3], 'c2':[4,5,6]})\n df.to_csv(path, index=False)\n result = read_csv(path)\n assert_frame_equal(df,result)\n df['c3'] = Series([7,8,9],dtype='int64')\n df.to_csv(path, index=False)\n result = read_csv(path)\n assert_frame_equal(df,result)\n\n def test_to_csv_headers(self):\n # GH6186, the presence or absence of `index` incorrectly\n # causes to_csv to have different header semantics.\n pname = '__tmp_to_csv_headers__'\n from_df = DataFrame([[1, 2], [3, 4]], columns=['A', 'B'])\n to_df = DataFrame([[1, 2], [3, 4]], columns=['X', 'Y'])\n with ensure_clean(pname) as path:\n from_df.to_csv(path, header=['X', 'Y'])\n recons = DataFrame.from_csv(path)\n assert_frame_equal(to_df, recons)\n\n from_df.to_csv(path, index=False, header=['X', 'Y'])\n recons = DataFrame.from_csv(path)\n recons.reset_index(inplace=True)\n assert_frame_equal(to_df, recons)\n\n def test_to_csv_multiindex(self):\n\n pname = '__tmp_to_csv_multiindex__'\n frame = self.frame\n old_index = frame.index\n arrays = np.arange(len(old_index) * 2).reshape(2, -1)\n new_index = MultiIndex.from_arrays(arrays, names=['first', 'second'])\n frame.index = new_index\n\n with ensure_clean(pname) as path:\n\n frame.to_csv(path, header=False)\n frame.to_csv(path, columns=['A', 'B'])\n\n # round trip\n frame.to_csv(path)\n df = DataFrame.from_csv(path, index_col=[0, 1], parse_dates=False)\n\n assert_frame_equal(frame, df, check_names=False) # TODO to_csv drops column name\n self.assertEqual(frame.index.names, df.index.names)\n self.frame.index = old_index # needed if setUP becomes a classmethod\n\n # try multiindex with dates\n tsframe = self.tsframe\n old_index = tsframe.index\n new_index = [old_index, np.arange(len(old_index))]\n tsframe.index = MultiIndex.from_arrays(new_index)\n\n tsframe.to_csv(path, index_label=['time', 'foo'])\n recons = DataFrame.from_csv(path, index_col=[0, 1])\n assert_frame_equal(tsframe, recons, check_names=False) # TODO to_csv drops column name\n\n # do not load index\n tsframe.to_csv(path)\n recons = DataFrame.from_csv(path, index_col=None)\n np.testing.assert_equal(len(recons.columns), len(tsframe.columns) + 2)\n\n # no index\n tsframe.to_csv(path, index=False)\n recons = DataFrame.from_csv(path, index_col=None)\n assert_almost_equal(recons.values, self.tsframe.values)\n self.tsframe.index = old_index # needed if setUP becomes classmethod\n\n with ensure_clean(pname) as path:\n # GH3571, GH1651, GH3141\n\n def _make_frame(names=None):\n if names is True:\n names = ['first','second']\n return DataFrame(np.random.randint(0,10,size=(3,3)),\n columns=MultiIndex.from_tuples([('bah', 'foo'),\n ('bah', 'bar'),\n ('ban', 'baz')],\n names=names),\n dtype='int64')\n\n # column & index are multi-index\n df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1,2,3],index_col=[0,1],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # column is mi\n df = mkdf(5,3,r_idx_nlevels=1,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1,2,3],index_col=0,tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # dup column names?\n df = mkdf(5,3,r_idx_nlevels=3,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1,2,3],index_col=[0,1,2],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # writing with no index\n df = _make_frame()\n df.to_csv(path,tupleize_cols=False,index=False)\n result = read_csv(path,header=[0,1],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # we lose the names here\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=False,index=False)\n result = read_csv(path,header=[0,1],tupleize_cols=False)\n self.assertTrue(all([ x is None for x in result.columns.names ]))\n result.columns.names = df.columns.names\n assert_frame_equal(df,result)\n\n # tupleize_cols=True and index=False\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=True,index=False)\n result = read_csv(path,header=0,tupleize_cols=True,index_col=None)\n result.columns = df.columns\n assert_frame_equal(df,result)\n\n # whatsnew example\n df = _make_frame()\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=False)\n result = read_csv(path,header=[0,1],index_col=[0],tupleize_cols=False)\n assert_frame_equal(df,result)\n\n # column & index are multi-index (compatibility)\n df = mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)\n df.to_csv(path,tupleize_cols=True)\n result = read_csv(path,header=0,index_col=[0,1],tupleize_cols=True)\n result.columns = df.columns\n assert_frame_equal(df,result)\n\n # invalid options\n df = _make_frame(True)\n df.to_csv(path,tupleize_cols=False)\n\n # catch invalid headers\n with assertRaisesRegexp(CParserError, 'Passed header=\\[0,1,2\\] are too many rows for this multi_index of columns'):\n read_csv(path,tupleize_cols=False,header=lrange(3),index_col=0)\n\n with assertRaisesRegexp(CParserError, 'Passed header=\\[0,1,2,3,4,5,6\\], len of 7, but only 6 lines in file'):\n read_csv(path,tupleize_cols=False,header=lrange(7),index_col=0)\n\n for i in [4,5,6]:\n with tm.assertRaises(CParserError):\n read_csv(path, tupleize_cols=False, header=lrange(i), index_col=0)\n\n # write with cols\n with assertRaisesRegexp(TypeError, 'cannot specify cols with a MultiIndex'):\n df.to_csv(path, tupleize_cols=False, columns=['foo', 'bar'])\n\n with ensure_clean(pname) as path:\n # empty\n tsframe[:0].to_csv(path)\n recons = DataFrame.from_csv(path)\n exp = tsframe[:0]\n exp.index = []\n\n self.assertTrue(recons.columns.equals(exp.columns))\n self.assertEqual(len(recons), 0)\n\n def test_to_csv_float32_nanrep(self):\n df = DataFrame(np.random.randn(1, 4).astype(np.float32))\n df[1] = np.nan\n\n with ensure_clean('__tmp_to_csv_float32_nanrep__.csv') as path:\n df.to_csv(path, na_rep=999)\n\n with open(path) as f:\n lines = f.readlines()\n self.assertEqual(lines[1].split(',')[2], '999')\n\n def test_to_csv_withcommas(self):\n\n # Commas inside fields should be correctly escaped when saving as CSV.\n df = DataFrame({'A': [1, 2, 3], 'B': ['5,6', '7,8', '9,0']})\n\n with ensure_clean('__tmp_to_csv_withcommas__.csv') as path:\n df.to_csv(path)\n df2 = DataFrame.from_csv(path)\n assert_frame_equal(df2, df)\n\n def test_to_csv_mixed(self):\n\n def create_cols(name):\n return [ \"%s%03d\" % (name,i) for i in range(5) ]\n\n df_float = DataFrame(np.random.randn(100, 5),dtype='float64',columns=create_cols('float'))\n df_int = DataFrame(np.random.randn(100, 5),dtype='int64',columns=create_cols('int'))\n df_bool = DataFrame(True,index=df_float.index,columns=create_cols('bool'))\n df_object = DataFrame('foo',index=df_float.index,columns=create_cols('object'))\n df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=create_cols('date'))\n\n # add in some nans\n df_float.ix[30:50,1:3] = np.nan\n\n #### this is a bug in read_csv right now ####\n #df_dt.ix[30:50,1:3] = np.nan\n\n df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)\n\n # dtype\n dtypes = dict()\n for n,dtype in [('float',np.float64),('int',np.int64),('bool',np.bool),('object',np.object)]:\n for c in create_cols(n):\n dtypes[c] = dtype\n\n with ensure_clean() as filename:\n df.to_csv(filename)\n rs = read_csv(filename, index_col=0, dtype=dtypes, parse_dates=create_cols('date'))\n assert_frame_equal(rs, df)\n\n def test_to_csv_dups_cols(self):\n\n df = DataFrame(np.random.randn(1000, 30),columns=lrange(15)+lrange(15),dtype='float64')\n\n with ensure_clean() as filename:\n df.to_csv(filename) # single dtype, fine\n result = read_csv(filename,index_col=0)\n result.columns = df.columns\n assert_frame_equal(result,df)\n\n df_float = DataFrame(np.random.randn(1000, 3),dtype='float64')\n df_int = DataFrame(np.random.randn(1000, 3),dtype='int64')\n df_bool = DataFrame(True,index=df_float.index,columns=lrange(3))\n df_object = DataFrame('foo',index=df_float.index,columns=lrange(3))\n df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=lrange(3))\n df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1, ignore_index=True)\n\n cols = []\n for i in range(5):\n cols.extend([0,1,2])\n df.columns = cols\n\n from pandas import to_datetime\n with ensure_clean() as filename:\n df.to_csv(filename)\n result = read_csv(filename,index_col=0)\n\n # date cols\n for i in ['0.4','1.4','2.4']:\n result[i] = to_datetime(result[i])\n\n result.columns = df.columns\n assert_frame_equal(result,df)\n\n # GH3457\n from pandas.util.testing import makeCustomDataframe as mkdf\n\n N=10\n df= mkdf(N, 3)\n df.columns = ['a','a','b']\n\n with ensure_clean() as filename:\n df.to_csv(filename)\n\n # read_csv will rename the dups columns\n result = read_csv(filename,index_col=0)\n result = result.rename(columns={ 'a.1' : 'a' })\n assert_frame_equal(result,df)\n\n def test_to_csv_chunking(self):\n\n aa=DataFrame({'A':lrange(100000)})\n aa['B'] = aa.A + 1.0\n aa['C'] = aa.A + 2.0\n aa['D'] = aa.A + 3.0\n\n for chunksize in [10000,50000,100000]:\n with ensure_clean() as filename:\n aa.to_csv(filename,chunksize=chunksize)\n rs = read_csv(filename,index_col=0)\n assert_frame_equal(rs, aa)\n\n def test_to_csv_bug(self):\n f1 = StringIO('a,1.0\\nb,2.0')\n df = DataFrame.from_csv(f1, header=None)\n newdf = DataFrame({'t': df[df.columns[0]]})\n\n with ensure_clean() as path:\n newdf.to_csv(path)\n\n recons = read_csv(path, index_col=0)\n assert_frame_equal(recons, newdf, check_names=False) # don't check_names as t != 1\n\n def test_to_csv_unicode(self):\n\n df = DataFrame({u('c/\\u03c3'): [1, 2, 3]})\n with ensure_clean() as path:\n\n df.to_csv(path, encoding='UTF-8')\n df2 = read_csv(path, index_col=0, encoding='UTF-8')\n assert_frame_equal(df, df2)\n\n df.to_csv(path, encoding='UTF-8', index=False)\n df2 = read_csv(path, index_col=None, encoding='UTF-8')\n assert_frame_equal(df, df2)\n\n def test_to_csv_unicode_index_col(self):\n buf = StringIO('')\n df = DataFrame(\n [[u(\"\\u05d0\"), \"d2\", \"d3\", \"d4\"], [\"a1\", \"a2\", \"a3\", \"a4\"]],\n columns=[u(\"\\u05d0\"),\n u(\"\\u05d1\"), u(\"\\u05d2\"), u(\"\\u05d3\")],\n index=[u(\"\\u05d0\"), u(\"\\u05d1\")])\n\n df.to_csv(buf, encoding='UTF-8')\n buf.seek(0)\n\n df2 = read_csv(buf, index_col=0, encoding='UTF-8')\n assert_frame_equal(df, df2)\n\n def test_to_csv_stringio(self):\n buf = StringIO()\n self.frame.to_csv(buf)\n buf.seek(0)\n recons = read_csv(buf, index_col=0)\n assert_frame_equal(recons, self.frame, check_names=False) # TODO to_csv drops column name\n\n def test_to_csv_float_format(self):\n\n df = DataFrame([[0.123456, 0.234567, 0.567567],\n [12.32112, 123123.2, 321321.2]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n\n with ensure_clean() as filename:\n\n df.to_csv(filename, float_format='%.2f')\n\n rs = read_csv(filename, index_col=0)\n xp = DataFrame([[0.12, 0.23, 0.57],\n [12.32, 123123.20, 321321.20]],\n index=['A', 'B'], columns=['X', 'Y', 'Z'])\n assert_frame_equal(rs, xp)\n\n def test_to_csv_quoting(self):\n df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})\n\n buf = StringIO()\n df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC)\n\n result = buf.getvalue()\n expected = ('\"A\",\"B\"\\n'\n '1,\"foo\"\\n'\n '2,\"bar\"\\n'\n '3,\"baz\"\\n')\n\n self.assertEqual(result, expected)\n\n # quoting windows line terminators, presents with encoding?\n # #3503\n text = 'a,b,c\\n1,\"test \\r\\n\",3\\n'\n df = pd.read_csv(StringIO(text))\n buf = StringIO()\n df.to_csv(buf, encoding='utf-8', index=False)\n self.assertEqual(buf.getvalue(), text)\n\n def test_to_csv_unicodewriter_quoting(self):\n df = DataFrame({'A': [1, 2, 3], 'B': ['foo', 'bar', 'baz']})\n\n buf = StringIO()\n df.to_csv(buf, index=False, quoting=csv.QUOTE_NONNUMERIC,\n encoding='utf-8')\n\n result = buf.getvalue()\n expected = ('\"A\",\"B\"\\n'\n '1,\"foo\"\\n'\n '2,\"bar\"\\n'\n '3,\"baz\"\\n')\n\n self.assertEqual(result, expected)\n\n def test_to_csv_quote_none(self):\n # GH4328\n df = DataFrame({'A': ['hello', '{\"hello\"}']})\n for encoding in (None, 'utf-8'):\n buf = StringIO()\n df.to_csv(buf, quoting=csv.QUOTE_NONE,\n encoding=encoding, index=False)\n result = buf.getvalue()\n expected = 'A\\nhello\\n{\"hello\"}\\n'\n self.assertEqual(result, expected)\n\n def test_to_csv_index_no_leading_comma(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n index=['one', 'two', 'three'])\n\n buf = StringIO()\n df.to_csv(buf, index_label=False)\n expected = ('A,B\\n'\n 'one,1,4\\n'\n 'two,2,5\\n'\n 'three,3,6\\n')\n self.assertEqual(buf.getvalue(), expected)\n\n def test_to_csv_line_terminators(self):\n df = DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},\n index=['one', 'two', 'three'])\n\n buf = StringIO()\n df.to_csv(buf, line_terminator='\\r\\n')\n expected = (',A,B\\r\\n'\n 'one,1,4\\r\\n'\n 'two,2,5\\r\\n'\n 'three,3,6\\r\\n')\n self.assertEqual(buf.getvalue(), expected)\n\n buf = StringIO()\n df.to_csv(buf) # The default line terminator remains \\n\n expected = (',A,B\\n'\n 'one,1,4\\n'\n 'two,2,5\\n'\n 'three,3,6\\n')\n self.assertEqual(buf.getvalue(), expected)\n\n def test_info(self):\n io = StringIO()\n self.frame.info(buf=io)\n self.tsframe.info(buf=io)\n\n frame = DataFrame(np.random.randn(5, 3))\n\n import sys\n sys.stdout = StringIO()\n frame.info()\n frame.info(verbose=False)\n sys.stdout = sys.__stdout__\n\n def test_info_wide(self):\n from pandas import set_option, reset_option\n io = StringIO()\n df = DataFrame(np.random.randn(5, 101))\n df.info(buf=io)\n\n io = StringIO()\n df.info(buf=io, max_cols=101)\n rs = io.getvalue()\n self.assertTrue(len(rs.splitlines()) > 100)\n xp = rs\n\n set_option('display.max_info_columns', 101)\n io = StringIO()\n df.info(buf=io)\n self.assertEqual(rs, xp)\n reset_option('display.max_info_columns')\n\n def test_info_duplicate_columns(self):\n io = StringIO()\n\n # it works!\n frame = DataFrame(np.random.randn(1500, 4),\n columns=['a', 'a', 'b', 'b'])\n frame.info(buf=io)\n\n def test_info_shows_column_dtypes(self):\n dtypes = ['int64', 'float64', 'datetime64[ns]', 'timedelta64[ns]',\n 'complex128', 'object', 'bool']\n data = {}\n n = 10\n for i, dtype in enumerate(dtypes):\n data[i] = np.random.randint(2, size=n).astype(dtype)\n df = DataFrame(data)\n buf = StringIO()\n df.info(buf=buf)\n res = buf.getvalue()\n for i, dtype in enumerate(dtypes):\n name = '%d %d non-null %s' % (i, n, dtype)\n assert name in res\n\n def test_info_max_cols(self):\n df = DataFrame(np.random.randn(10, 5))\n for len_, verbose in [(4, None), (4, False), (9, True)]:\n # For verbose always ^ setting ^ summarize ^ full output\n with pd.option_context('max_info_columns', 4):\n buf = StringIO()\n df.info(buf=buf, verbose=verbose)\n res = buf.getvalue()\n self.assertEqual(len(res.split('\\n')), len_)\n\n for len_, verbose in [(9, None), (4, False), (9, True)]:\n\n # max_cols no exceeded\n with pd.option_context('max_info_columns', 5):\n buf = StringIO()\n df.info(buf=buf, verbose=verbose)\n res = buf.getvalue()\n self.assertEqual(len(res.split('\\n')), len_)\n\n for len_, max_cols in [(9, 5), (4, 4)]:\n # setting truncates\n with pd.option_context('max_info_columns', 4):\n buf = StringIO()\n df.info(buf=buf, max_cols=max_cols)\n res = buf.getvalue()\n self.assertEqual(len(res.split('\\n')), len_)\n\n # setting wouldn't truncate\n with pd.option_context('max_info_columns', 5):\n buf = StringIO()\n df.info(buf=buf, max_cols=max_cols)\n res = buf.getvalue()\n self.assertEqual(len(res.split('\\n')), len_)\n\n\n def test_dtypes(self):\n self.mixed_frame['bool'] = self.mixed_frame['A'] > 0\n result = self.mixed_frame.dtypes\n expected = Series(dict((k, v.dtype)\n for k, v in compat.iteritems(self.mixed_frame)),\n index=result.index)\n assert_series_equal(result, expected)\n\n def test_convert_objects(self):\n\n oops = self.mixed_frame.T.T\n converted = oops.convert_objects()\n assert_frame_equal(converted, self.mixed_frame)\n self.assertEqual(converted['A'].dtype, np.float64)\n\n # force numeric conversion\n self.mixed_frame['H'] = '1.'\n self.mixed_frame['I'] = '1'\n\n # add in some items that will be nan\n l = len(self.mixed_frame)\n self.mixed_frame['J'] = '1.'\n self.mixed_frame['K'] = '1'\n self.mixed_frame.ix[0:5,['J','K']] = 'garbled'\n converted = self.mixed_frame.convert_objects(convert_numeric=True)\n self.assertEqual(converted['H'].dtype, 'float64')\n self.assertEqual(converted['I'].dtype, 'int64')\n self.assertEqual(converted['J'].dtype, 'float64')\n self.assertEqual(converted['K'].dtype, 'float64')\n self.assertEqual(len(converted['J'].dropna()), l-5)\n self.assertEqual(len(converted['K'].dropna()), l-5)\n\n # via astype\n converted = self.mixed_frame.copy()\n converted['H'] = converted['H'].astype('float64')\n converted['I'] = converted['I'].astype('int64')\n self.assertEqual(converted['H'].dtype, 'float64')\n self.assertEqual(converted['I'].dtype, 'int64')\n\n # via astype, but errors\n converted = self.mixed_frame.copy()\n with assertRaisesRegexp(ValueError, 'invalid literal'):\n converted['H'].astype('int32')\n\n # mixed in a single column\n df = DataFrame(dict(s = Series([1, 'na', 3 ,4])))\n result = df.convert_objects(convert_numeric=True)\n expected = DataFrame(dict(s = Series([1, np.nan, 3 ,4])))\n assert_frame_equal(result, expected)\n\n def test_convert_objects_no_conversion(self):\n mixed1 = DataFrame(\n {'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})\n mixed2 = mixed1.convert_objects()\n assert_frame_equal(mixed1, mixed2)\n\n def test_append_series_dict(self):\n df = DataFrame(np.random.randn(5, 4),\n columns=['foo', 'bar', 'baz', 'qux'])\n\n series = df.ix[4]\n with assertRaisesRegexp(ValueError, 'Indexes have overlapping values'):\n df.append(series, verify_integrity=True)\n series.name = None\n with assertRaisesRegexp(TypeError, 'Can only append a Series if '\n 'ignore_index=True'):\n df.append(series, verify_integrity=True)\n\n result = df.append(series[::-1], ignore_index=True)\n expected = df.append(DataFrame({0: series[::-1]}, index=df.columns).T,\n ignore_index=True)\n assert_frame_equal(result, expected)\n\n # dict\n result = df.append(series.to_dict(), ignore_index=True)\n assert_frame_equal(result, expected)\n\n result = df.append(series[::-1][:3], ignore_index=True)\n expected = df.append(DataFrame({0: series[::-1][:3]}).T,\n ignore_index=True)\n assert_frame_equal(result, expected.ix[:, result.columns])\n\n # can append when name set\n row = df.ix[4]\n row.name = 5\n result = df.append(row)\n expected = df.append(df[-1:], ignore_index=True)\n assert_frame_equal(result, expected)\n\n def test_append_list_of_series_dicts(self):\n df = DataFrame(np.random.randn(5, 4),\n columns=['foo', 'bar', 'baz', 'qux'])\n\n dicts = [x.to_dict() for idx, x in df.iterrows()]\n\n result = df.append(dicts, ignore_index=True)\n expected = df.append(df, ignore_index=True)\n assert_frame_equal(result, expected)\n\n # different columns\n dicts = [{'foo': 1, 'bar': 2, 'baz': 3, 'peekaboo': 4},\n {'foo': 5, 'bar': 6, 'baz': 7, 'peekaboo': 8}]\n result = df.append(dicts, ignore_index=True)\n expected = df.append(DataFrame(dicts), ignore_index=True)\n assert_frame_equal(result, expected)\n\n def test_append_empty_dataframe(self):\n\n # Empty df append empty df\n df1 = DataFrame([])\n df2 = DataFrame([])\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n # Non-empty df append empty df\n df1 = DataFrame(np.random.randn(5, 2))\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n # Empty df with columns append empty df\n df1 = DataFrame(columns=['bar', 'foo'])\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n # Non-Empty df with columns append empty df\n df1 = DataFrame(np.random.randn(5, 2), columns=['bar', 'foo'])\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n def test_append_dtypes(self):\n\n # GH 5754\n # row appends of different dtypes (so need to do by-item)\n # can sometimes infer the correct type\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(5))\n df2 = DataFrame()\n result = df1.append(df2)\n expected = df1.copy()\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : 'foo' }, index=lrange(1,2))\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : [ Timestamp('20130101'), 'foo' ]})\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2))\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : np.nan }, index=lrange(1,2), dtype=object)\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), np.nan ],dtype='M8[ns]') })\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : np.nan }, index=lrange(1))\n df2 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1,2))\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ np.nan, Timestamp('20130101')] ,dtype='M8[ns]') })\n assert_frame_equal(result, expected)\n\n df1 = DataFrame({ 'bar' : Timestamp('20130101') }, index=lrange(1))\n df2 = DataFrame({ 'bar' : 1 }, index=lrange(1,2), dtype=object)\n result = df1.append(df2)\n expected = DataFrame({ 'bar' : Series([ Timestamp('20130101'), 1 ]) })\n assert_frame_equal(result, expected)\n\n def test_asfreq(self):\n offset_monthly = self.tsframe.asfreq(datetools.bmonthEnd)\n rule_monthly = self.tsframe.asfreq('BM')\n\n assert_almost_equal(offset_monthly['A'], rule_monthly['A'])\n\n filled = rule_monthly.asfreq('B', method='pad')\n # TODO: actually check that this worked.\n\n # don't forget!\n filled_dep = rule_monthly.asfreq('B', method='pad')\n\n # test does not blow up on length-0 DataFrame\n zero_length = self.tsframe.reindex([])\n result = zero_length.asfreq('BM')\n self.assertIsNot(result, zero_length)\n\n def test_asfreq_datetimeindex(self):\n df = DataFrame({'A': [1, 2, 3]},\n index=[datetime(2011, 11, 1), datetime(2011, 11, 2),\n datetime(2011, 11, 3)])\n df = df.asfreq('B')\n tm.assert_isinstance(df.index, DatetimeIndex)\n\n ts = df['A'].asfreq('B')\n tm.assert_isinstance(ts.index, DatetimeIndex)\n\n def test_at_time_between_time_datetimeindex(self):\n index = date_range(\"2012-01-01\", \"2012-01-05\", freq='30min')\n df = DataFrame(randn(len(index), 5), index=index)\n akey = time(12, 0, 0)\n bkey = slice(time(13, 0, 0), time(14, 0, 0))\n ainds = [24, 72, 120, 168]\n binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]\n\n result = df.at_time(akey)\n expected = df.ix[akey]\n expected2 = df.ix[ainds]\n assert_frame_equal(result, expected)\n assert_frame_equal(result, expected2)\n self.assertEqual(len(result), 4)\n\n result = df.between_time(bkey.start, bkey.stop)\n expected = df.ix[bkey]\n expected2 = df.ix[binds]\n assert_frame_equal(result, expected)\n assert_frame_equal(result, expected2)\n self.assertEqual(len(result), 12)\n\n result = df.copy()\n result.ix[akey] = 0\n result = result.ix[akey]\n expected = df.ix[akey].copy()\n expected.ix[:] = 0\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.ix[akey] = 0\n result.ix[akey] = df.ix[ainds]\n assert_frame_equal(result, df)\n\n result = df.copy()\n result.ix[bkey] = 0\n result = result.ix[bkey]\n expected = df.ix[bkey].copy()\n expected.ix[:] = 0\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.ix[bkey] = 0\n result.ix[bkey] = df.ix[binds]\n assert_frame_equal(result, df)\n\n def test_as_matrix(self):\n frame = self.frame\n mat = frame.as_matrix()\n\n frameCols = frame.columns\n for i, row in enumerate(mat):\n for j, value in enumerate(row):\n col = frameCols[j]\n if np.isnan(value):\n self.assertTrue(np.isnan(frame[col][i]))\n else:\n self.assertEqual(value, frame[col][i])\n\n # mixed type\n mat = self.mixed_frame.as_matrix(['foo', 'A'])\n self.assertEqual(mat[0, 0], 'bar')\n\n df = DataFrame({'real': [1, 2, 3], 'complex': [1j, 2j, 3j]})\n mat = df.as_matrix()\n self.assertEqual(mat[0, 0], 1j)\n\n # single block corner case\n mat = self.frame.as_matrix(['A', 'B'])\n expected = self.frame.reindex(columns=['A', 'B']).values\n assert_almost_equal(mat, expected)\n\n def test_as_matrix_duplicates(self):\n df = DataFrame([[1, 2, 'a', 'b'],\n [1, 2, 'a', 'b']],\n columns=['one', 'one', 'two', 'two'])\n\n result = df.values\n expected = np.array([[1, 2, 'a', 'b'], [1, 2, 'a', 'b']],\n dtype=object)\n\n self.assertTrue(np.array_equal(result, expected))\n\n def test_ftypes(self):\n frame = self.mixed_float\n expected = Series(dict(A = 'float32:dense', B = 'float32:dense', C = 'float16:dense', D = 'float64:dense'))\n expected.sort()\n result = frame.ftypes\n result.sort()\n assert_series_equal(result,expected)\n\n def test_values(self):\n self.frame.values[:, 0] = 5.\n self.assertTrue((self.frame.values[:, 0] == 5).all())\n\n def test_deepcopy(self):\n cp = deepcopy(self.frame)\n series = cp['A']\n series[:] = 10\n for idx, value in compat.iteritems(series):\n self.assertNotEqual(self.frame['A'][idx], value)\n\n def test_copy(self):\n cop = self.frame.copy()\n cop['E'] = cop['A']\n self.assertNotIn('E', self.frame)\n\n # copy objects\n copy = self.mixed_frame.copy()\n self.assertIsNot(copy._data, self.mixed_frame._data)\n\n def _check_method(self, method='pearson', check_minp=False):\n if not check_minp:\n correls = self.frame.corr(method=method)\n exp = self.frame['A'].corr(self.frame['C'], method=method)\n assert_almost_equal(correls['A']['C'], exp)\n else:\n result = self.frame.corr(min_periods=len(self.frame) - 8)\n expected = self.frame.corr()\n expected.ix['A', 'B'] = expected.ix['B', 'A'] = nan\n\n def test_corr_pearson(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('pearson')\n\n def test_corr_kendall(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('kendall')\n\n def test_corr_spearman(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n self._check_method('spearman')\n\n def test_corr_non_numeric(self):\n tm._skip_if_no_scipy()\n self.frame['A'][:5] = nan\n self.frame['B'][5:10] = nan\n\n # exclude non-numeric types\n result = self.mixed_frame.corr()\n expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].corr()\n assert_frame_equal(result, expected)\n\n def test_corr_nooverlap(self):\n tm._skip_if_no_scipy()\n\n # nothing in common\n for meth in ['pearson', 'kendall', 'spearman']:\n df = DataFrame({'A': [1, 1.5, 1, np.nan, np.nan, np.nan],\n 'B': [np.nan, np.nan, np.nan, 1, 1.5, 1]})\n rs = df.corr(meth)\n self.assertTrue(isnull(rs.ix['A', 'B']))\n self.assertTrue(isnull(rs.ix['B', 'A']))\n self.assertEqual(rs.ix['A', 'A'], 1)\n self.assertEqual(rs.ix['B', 'B'], 1)\n\n def test_corr_constant(self):\n tm._skip_if_no_scipy()\n\n # constant --> all NA\n\n for meth in ['pearson', 'spearman']:\n df = DataFrame({'A': [1, 1, 1, np.nan, np.nan, np.nan],\n 'B': [np.nan, np.nan, np.nan, 1, 1, 1]})\n rs = df.corr(meth)\n self.assertTrue(isnull(rs.values).all())\n\n def test_corr_int(self):\n # dtypes other than float64 #1761\n df3 = DataFrame({\"a\": [1, 2, 3, 4], \"b\": [1, 2, 3, 4]})\n\n # it works!\n df3.cov()\n df3.corr()\n\n def test_cov(self):\n # min_periods no NAs (corner case)\n expected = self.frame.cov()\n result = self.frame.cov(min_periods=len(self.frame))\n\n assert_frame_equal(expected, result)\n\n result = self.frame.cov(min_periods=len(self.frame) + 1)\n self.assertTrue(isnull(result.values).all())\n\n # with NAs\n frame = self.frame.copy()\n frame['A'][:5] = nan\n frame['B'][5:10] = nan\n result = self.frame.cov(min_periods=len(self.frame) - 8)\n expected = self.frame.cov()\n expected.ix['A', 'B'] = np.nan\n expected.ix['B', 'A'] = np.nan\n\n # regular\n self.frame['A'][:5] = nan\n self.frame['B'][:10] = nan\n cov = self.frame.cov()\n\n assert_almost_equal(cov['A']['C'],\n self.frame['A'].cov(self.frame['C']))\n\n # exclude non-numeric types\n result = self.mixed_frame.cov()\n expected = self.mixed_frame.ix[:, ['A', 'B', 'C', 'D']].cov()\n assert_frame_equal(result, expected)\n\n # Single column frame\n df = DataFrame(np.linspace(0.0,1.0,10))\n result = df.cov()\n expected = DataFrame(np.cov(df.values.T).reshape((1,1)),\n index=df.columns,columns=df.columns)\n assert_frame_equal(result, expected)\n df.ix[0] = np.nan\n result = df.cov()\n expected = DataFrame(np.cov(df.values[1:].T).reshape((1,1)),\n index=df.columns,columns=df.columns)\n assert_frame_equal(result, expected)\n\n def test_corrwith(self):\n a = self.tsframe\n noise = Series(randn(len(a)), index=a.index)\n\n b = self.tsframe + noise\n\n # make sure order does not matter\n b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])\n del b['B']\n\n colcorr = a.corrwith(b, axis=0)\n assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))\n\n rowcorr = a.corrwith(b, axis=1)\n assert_series_equal(rowcorr, a.T.corrwith(b.T, axis=0))\n\n dropped = a.corrwith(b, axis=0, drop=True)\n assert_almost_equal(dropped['A'], a['A'].corr(b['A']))\n self.assertNotIn('B', dropped)\n\n dropped = a.corrwith(b, axis=1, drop=True)\n self.assertNotIn(a.index[-1], dropped.index)\n\n # non time-series data\n index = ['a', 'b', 'c', 'd', 'e']\n columns = ['one', 'two', 'three', 'four']\n df1 = DataFrame(randn(5, 4), index=index, columns=columns)\n df2 = DataFrame(randn(4, 4), index=index[:4], columns=columns)\n correls = df1.corrwith(df2, axis=1)\n for row in index[:4]:\n assert_almost_equal(correls[row], df1.ix[row].corr(df2.ix[row]))\n\n def test_corrwith_with_objects(self):\n df1 = tm.makeTimeDataFrame()\n df2 = tm.makeTimeDataFrame()\n cols = ['A', 'B', 'C', 'D']\n\n df1['obj'] = 'foo'\n df2['obj'] = 'bar'\n\n result = df1.corrwith(df2)\n expected = df1.ix[:, cols].corrwith(df2.ix[:, cols])\n assert_series_equal(result, expected)\n\n result = df1.corrwith(df2, axis=1)\n expected = df1.ix[:, cols].corrwith(df2.ix[:, cols], axis=1)\n assert_series_equal(result, expected)\n\n def test_corrwith_series(self):\n result = self.tsframe.corrwith(self.tsframe['A'])\n expected = self.tsframe.apply(self.tsframe['A'].corr)\n\n assert_series_equal(result, expected)\n\n def test_corrwith_matches_corrcoef(self):\n df1 = DataFrame(np.arange(10000), columns=['a'])\n df2 = DataFrame(np.arange(10000)**2, columns=['a'])\n c1 = df1.corrwith(df2)['a']\n c2 = np.corrcoef(df1['a'],df2['a'])[0][1]\n\n assert_almost_equal(c1, c2)\n self.assertTrue(c1 < 1)\n\n def test_drop_names(self):\n df = DataFrame([[1, 2, 3],[3, 4, 5],[5, 6, 7]], index=['a', 'b', 'c'],\n columns=['d', 'e', 'f'])\n df.index.name, df.columns.name = 'first', 'second'\n df_dropped_b = df.drop('b')\n df_dropped_e = df.drop('e', axis=1)\n df_inplace_b, df_inplace_e = df.copy(), df.copy()\n df_inplace_b.drop('b', inplace=True)\n df_inplace_e.drop('e', axis=1, inplace=True)\n for obj in (df_dropped_b, df_dropped_e, df_inplace_b, df_inplace_e):\n self.assertEqual(obj.index.name, 'first')\n self.assertEqual(obj.columns.name, 'second')\n self.assertEqual(list(df.columns), ['d', 'e', 'f'])\n\n def test_dropEmptyRows(self):\n N = len(self.frame.index)\n mat = randn(N)\n mat[:5] = nan\n\n frame = DataFrame({'foo': mat}, index=self.frame.index)\n original = Series(mat, index=self.frame.index)\n expected = original.dropna()\n inplace_frame1, inplace_frame2 = frame.copy(), frame.copy()\n\n smaller_frame = frame.dropna(how='all')\n # check that original was preserved\n assert_series_equal(frame['foo'], original)\n inplace_frame1.dropna(how='all', inplace=True)\n assert_series_equal(smaller_frame['foo'], expected)\n assert_series_equal(inplace_frame1['foo'], expected)\n\n smaller_frame = frame.dropna(how='all', subset=['foo'])\n inplace_frame2.dropna(how='all', subset=['foo'], inplace=True)\n assert_series_equal(smaller_frame['foo'], expected)\n assert_series_equal(inplace_frame2['foo'], expected)\n\n def test_dropIncompleteRows(self):\n N = len(self.frame.index)\n mat = randn(N)\n mat[:5] = nan\n\n frame = DataFrame({'foo': mat}, index=self.frame.index)\n frame['bar'] = 5\n original = Series(mat, index=self.frame.index)\n inp_frame1, inp_frame2 = frame.copy(), frame.copy()\n\n smaller_frame = frame.dropna()\n assert_series_equal(frame['foo'], original)\n inp_frame1.dropna(inplace=True)\n self.assert_numpy_array_equal(smaller_frame['foo'], mat[5:])\n self.assert_numpy_array_equal(inp_frame1['foo'], mat[5:])\n\n samesize_frame = frame.dropna(subset=['bar'])\n assert_series_equal(frame['foo'], original)\n self.assertTrue((frame['bar'] == 5).all())\n inp_frame2.dropna(subset=['bar'], inplace=True)\n self.assertTrue(samesize_frame.index.equals(self.frame.index))\n self.assertTrue(inp_frame2.index.equals(self.frame.index))\n\n def test_dropna(self):\n df = DataFrame(np.random.randn(6, 4))\n df[2][:2] = nan\n\n dropped = df.dropna(axis=1)\n expected = df.ix[:, [0, 1, 3]]\n inp = df.copy()\n inp.dropna(axis=1, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n dropped = df.dropna(axis=0)\n expected = df.ix[lrange(2, 6)]\n inp = df.copy()\n inp.dropna(axis=0, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n # threshold\n dropped = df.dropna(axis=1, thresh=5)\n expected = df.ix[:, [0, 1, 3]]\n inp = df.copy()\n inp.dropna(axis=1, thresh=5, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n dropped = df.dropna(axis=0, thresh=4)\n expected = df.ix[lrange(2, 6)]\n inp = df.copy()\n inp.dropna(axis=0, thresh=4, inplace=True)\n assert_frame_equal(dropped, expected)\n assert_frame_equal(inp, expected)\n\n dropped = df.dropna(axis=1, thresh=4)\n assert_frame_equal(dropped, df)\n\n dropped = df.dropna(axis=1, thresh=3)\n assert_frame_equal(dropped, df)\n\n # subset\n dropped = df.dropna(axis=0, subset=[0, 1, 3])\n inp = df.copy()\n inp.dropna(axis=0, subset=[0, 1, 3], inplace=True)\n assert_frame_equal(dropped, df)\n assert_frame_equal(inp, df)\n\n # all\n dropped = df.dropna(axis=1, how='all')\n assert_frame_equal(dropped, df)\n\n df[2] = nan\n dropped = df.dropna(axis=1, how='all')\n expected = df.ix[:, [0, 1, 3]]\n assert_frame_equal(dropped, expected)\n\n # bad input\n self.assertRaises(ValueError, df.dropna, axis=3)\n\n\n def test_drop_and_dropna_caching(self):\n # tst that cacher updates\n original = Series([1, 2, np.nan])\n expected = Series([1, 2], dtype=original.dtype)\n df = pd.DataFrame({'A': original.values.copy()})\n df2 = df.copy()\n df['A'].dropna()\n assert_series_equal(df['A'], original)\n df['A'].dropna(inplace=True)\n assert_series_equal(df['A'], expected)\n df2['A'].drop([1])\n assert_series_equal(df2['A'], original)\n df2['A'].drop([1], inplace=True)\n assert_series_equal(df2['A'], original.drop([1]))\n\n def test_dropna_corner(self):\n # bad input\n self.assertRaises(ValueError, self.frame.dropna, how='foo')\n self.assertRaises(TypeError, self.frame.dropna, how=None)\n\n def test_dropna_multiple_axes(self):\n df = DataFrame([[1, np.nan, 2, 3],\n [4, np.nan, 5, 6],\n [np.nan, np.nan, np.nan, np.nan],\n [7, np.nan, 8, 9]])\n cp = df.copy()\n result = df.dropna(how='all', axis=[0, 1])\n result2 = df.dropna(how='all', axis=(0, 1))\n expected = df.dropna(how='all').dropna(how='all', axis=1)\n\n assert_frame_equal(result, expected)\n assert_frame_equal(result2, expected)\n assert_frame_equal(df, cp)\n\n inp = df.copy()\n inp.dropna(how='all', axis=(0, 1), inplace=True)\n assert_frame_equal(inp, expected)\n\n def test_drop_duplicates(self):\n df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates('AAA')\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('AAA', take_last=True)\n expected = df.ix[[6, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n expected = df.ix[[0, 1, 2, 3]]\n result = df.drop_duplicates(np.array(['AAA', 'B']))\n assert_frame_equal(result, expected)\n result = df.drop_duplicates(['AAA', 'B'])\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(('AAA', 'B'), take_last=True)\n expected = df.ix[[0, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n # consider everything\n df2 = df.ix[:, ['AAA', 'B', 'C']]\n\n result = df2.drop_duplicates()\n # in this case only\n expected = df2.drop_duplicates(['AAA', 'B'])\n assert_frame_equal(result, expected)\n\n result = df2.drop_duplicates(take_last=True)\n expected = df2.drop_duplicates(['AAA', 'B'], take_last=True)\n assert_frame_equal(result, expected)\n\n def test_drop_duplicates_deprecated_warning(self):\n df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n expected = df[:2]\n\n # Raises warning\n with tm.assert_produces_warning(False):\n result = df.drop_duplicates(subset='AAA')\n assert_frame_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning):\n result = df.drop_duplicates(cols='AAA')\n assert_frame_equal(result, expected)\n\n # Does not allow both subset and cols\n self.assertRaises(TypeError, df.drop_duplicates,\n kwargs={'cols': 'AAA', 'subset': 'B'})\n\n # Does not allow unknown kwargs\n self.assertRaises(TypeError, df.drop_duplicates,\n kwargs={'subset': 'AAA', 'bad_arg': True})\n\n def test_drop_duplicates_tuple(self):\n df = DataFrame({('AA', 'AB'): ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates(('AA', 'AB'))\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(('AA', 'AB'), take_last=True)\n expected = df.ix[[6, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n expected = df.ix[[0, 1, 2, 3]]\n result = df.drop_duplicates((('AA', 'AB'), 'B'))\n assert_frame_equal(result, expected)\n\n def test_drop_duplicates_NA(self):\n # none\n df = DataFrame({'A': [None, None, 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates('A')\n expected = df.ix[[0, 2, 3]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('A', take_last=True)\n expected = df.ix[[1, 6, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n result = df.drop_duplicates(['A', 'B'])\n expected = df.ix[[0, 2, 3, 6]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(['A', 'B'], take_last=True)\n expected = df.ix[[1, 5, 6, 7]]\n assert_frame_equal(result, expected)\n\n # nan\n df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1.0, np.nan, np.nan, np.nan, 1., 1., 1, 1.],\n 'D': lrange(8)})\n\n # single column\n result = df.drop_duplicates('C')\n expected = df[:2]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates('C', take_last=True)\n expected = df.ix[[3, 7]]\n assert_frame_equal(result, expected)\n\n # multi column\n result = df.drop_duplicates(['C', 'B'])\n expected = df.ix[[0, 1, 2, 4]]\n assert_frame_equal(result, expected)\n\n result = df.drop_duplicates(['C', 'B'], take_last=True)\n expected = df.ix[[1, 3, 6, 7]]\n assert_frame_equal(result, expected)\n\n def test_drop_duplicates_inplace(self):\n orig = DataFrame({'A': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # single column\n df = orig.copy()\n df.drop_duplicates('A', inplace=True)\n expected = orig[:2]\n result = df\n assert_frame_equal(result, expected)\n\n df = orig.copy()\n df.drop_duplicates('A', take_last=True, inplace=True)\n expected = orig.ix[[6, 7]]\n result = df\n assert_frame_equal(result, expected)\n\n # multi column\n df = orig.copy()\n df.drop_duplicates(['A', 'B'], inplace=True)\n expected = orig.ix[[0, 1, 2, 3]]\n result = df\n assert_frame_equal(result, expected)\n\n df = orig.copy()\n df.drop_duplicates(['A', 'B'], take_last=True, inplace=True)\n expected = orig.ix[[0, 5, 6, 7]]\n result = df\n assert_frame_equal(result, expected)\n\n # consider everything\n orig2 = orig.ix[:, ['A', 'B', 'C']].copy()\n\n df2 = orig2.copy()\n df2.drop_duplicates(inplace=True)\n # in this case only\n expected = orig2.drop_duplicates(['A', 'B'])\n result = df2\n assert_frame_equal(result, expected)\n\n df2 = orig2.copy()\n df2.drop_duplicates(take_last=True, inplace=True)\n expected = orig2.drop_duplicates(['A', 'B'], take_last=True)\n result = df2\n assert_frame_equal(result, expected)\n\n def test_duplicated_deprecated_warning(self):\n df = DataFrame({'AAA': ['foo', 'bar', 'foo', 'bar',\n 'foo', 'bar', 'bar', 'foo'],\n 'B': ['one', 'one', 'two', 'two',\n 'two', 'two', 'one', 'two'],\n 'C': [1, 1, 2, 2, 2, 2, 1, 2],\n 'D': lrange(8)})\n\n # Raises warning\n with tm.assert_produces_warning(False):\n result = df.duplicated(subset='AAA')\n\n with tm.assert_produces_warning(FutureWarning):\n result = df.duplicated(cols='AAA')\n\n # Does not allow both subset and cols\n self.assertRaises(TypeError, df.duplicated,\n kwargs={'cols': 'AAA', 'subset': 'B'})\n\n # Does not allow unknown kwargs\n self.assertRaises(TypeError, df.duplicated,\n kwargs={'subset': 'AAA', 'bad_arg': True})\n\n def test_drop_col_still_multiindex(self):\n arrays = [['a', 'b', 'c', 'top'],\n ['', '', '', 'OD'],\n ['', '', '', 'wx']]\n\n tuples = sorted(zip(*arrays))\n index = MultiIndex.from_tuples(tuples)\n\n df = DataFrame(randn(3, 4), columns=index)\n del df[('a', '', '')]\n assert(isinstance(df.columns, MultiIndex))\n\n def test_drop(self):\n simple = DataFrame({\"A\": [1, 2, 3, 4], \"B\": [0, 1, 2, 3]})\n assert_frame_equal(simple.drop(\"A\", axis=1), simple[['B']])\n assert_frame_equal(simple.drop([\"A\", \"B\"], axis='columns'),\n simple[[]])\n assert_frame_equal(simple.drop([0, 1, 3], axis=0), simple.ix[[2], :])\n assert_frame_equal(simple.drop([0, 3], axis='index'), simple.ix[[1, 2], :])\n\n #non-unique - wheee!\n nu_df = DataFrame(lzip(range(3), range(-3, 1), list('abc')),\n columns=['a', 'a', 'b'])\n assert_frame_equal(nu_df.drop('a', axis=1), nu_df[['b']])\n assert_frame_equal(nu_df.drop('b', axis='columns'), nu_df['a'])\n\n nu_df = nu_df.set_index(pd.Index(['X', 'Y', 'X']))\n nu_df.columns = list('abc')\n assert_frame_equal(nu_df.drop('X', axis='rows'), nu_df.ix[[\"Y\"], :])\n assert_frame_equal(nu_df.drop(['X', 'Y'], axis=0), nu_df.ix[[], :])\n\n # inplace cache issue\n # GH 5628\n df = pd.DataFrame(np.random.randn(10,3), columns=list('abc'))\n expected = df[~(df.b>0)]\n df.drop(labels=df[df.b>0].index, inplace=True)\n assert_frame_equal(df,expected)\n\n def test_fillna(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n zero_filled = self.tsframe.fillna(0)\n self.assertTrue((zero_filled['A'][:5] == 0).all())\n\n padded = self.tsframe.fillna(method='pad')\n self.assertTrue(np.isnan(padded['A'][:5]).all())\n self.assertTrue((padded['A'][-5:] == padded['A'][-5]).all())\n\n # mixed type\n self.mixed_frame['foo'][5:20] = nan\n self.mixed_frame['A'][-10:] = nan\n result = self.mixed_frame.fillna(value=0)\n result = self.mixed_frame.fillna(method='pad')\n\n self.assertRaises(ValueError, self.tsframe.fillna)\n self.assertRaises(ValueError, self.tsframe.fillna, 5, method='ffill')\n\n # mixed numeric (but no float16)\n mf = self.mixed_float.reindex(columns=['A','B','D'])\n mf['A'][-10:] = nan\n result = mf.fillna(value=0)\n _check_mixed_float(result, dtype = dict(C = None))\n\n result = mf.fillna(method='pad')\n _check_mixed_float(result, dtype = dict(C = None))\n\n # empty frame (GH #2778)\n df = DataFrame(columns=['x'])\n for m in ['pad','backfill']:\n df.x.fillna(method=m,inplace=1)\n df.x.fillna(method=m)\n\n # with different dtype (GH3386)\n df = DataFrame([['a','a',np.nan,'a'],['b','b',np.nan,'b'],['c','c',np.nan,'c']])\n\n result = df.fillna({ 2: 'foo' })\n expected = DataFrame([['a','a','foo','a'],['b','b','foo','b'],['c','c','foo','c']])\n assert_frame_equal(result, expected)\n\n df.fillna({ 2: 'foo' }, inplace=True)\n assert_frame_equal(df, expected)\n\n # limit and value\n df = DataFrame(np.random.randn(10,3))\n df.iloc[2:7,0] = np.nan\n df.iloc[3:5,2] = np.nan\n\n expected = df.copy()\n expected.iloc[2,0] = 999\n expected.iloc[3,2] = 999\n result = df.fillna(999,limit=1)\n assert_frame_equal(result, expected)\n\n # with datelike\n # GH 6344\n df = DataFrame({\n 'Date':[pd.NaT, Timestamp(\"2014-1-1\")],\n 'Date2':[ Timestamp(\"2013-1-1\"), pd.NaT]\n })\n\n expected = df.copy()\n expected['Date'] = expected['Date'].fillna(df.ix[0,'Date2'])\n result = df.fillna(value={'Date':df['Date2']})\n assert_frame_equal(result, expected)\n\n def test_fillna_dtype_conversion(self):\n # make sure that fillna on an empty frame works\n df = DataFrame(index=[\"A\",\"B\",\"C\"], columns = [1,2,3,4,5])\n result = df.get_dtype_counts().order()\n expected = Series({ 'object' : 5 })\n assert_series_equal(result, expected)\n\n result = df.fillna(1)\n expected = DataFrame(1, index=[\"A\",\"B\",\"C\"], columns = [1,2,3,4,5])\n result = result.get_dtype_counts().order()\n expected = Series({ 'int64' : 5 })\n assert_series_equal(result, expected)\n\n # empty block\n df = DataFrame(index=lrange(3),columns=['A','B'],dtype='float64')\n result = df.fillna('nan')\n expected = DataFrame('nan',index=lrange(3),columns=['A','B'])\n assert_frame_equal(result, expected)\n\n # equiv of replace\n df = DataFrame(dict(A = [1,np.nan], B = [1.,2.]))\n for v in ['',1,np.nan,1.0]:\n expected = df.replace(np.nan,v)\n result = df.fillna(v)\n assert_frame_equal(result, expected)\n\n def test_ffill(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n assert_frame_equal(self.tsframe.ffill(),\n self.tsframe.fillna(method='ffill'))\n\n def test_bfill(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n assert_frame_equal(self.tsframe.bfill(),\n self.tsframe.fillna(method='bfill'))\n\n def test_fillna_skip_certain_blocks(self):\n # don't try to fill boolean, int blocks\n\n df = DataFrame(np.random.randn(10, 4).astype(int))\n\n # it works!\n df.fillna(np.nan)\n\n def test_fillna_inplace(self):\n df = DataFrame(np.random.randn(10, 4))\n df[1][:4] = np.nan\n df[3][-4:] = np.nan\n\n expected = df.fillna(value=0)\n self.assertIsNot(expected, df)\n\n df.fillna(value=0, inplace=True)\n assert_frame_equal(df, expected)\n\n df[1][:4] = np.nan\n df[3][-4:] = np.nan\n expected = df.fillna(method='ffill')\n self.assertIsNot(expected, df)\n\n df.fillna(method='ffill', inplace=True)\n assert_frame_equal(df, expected)\n\n def test_fillna_dict_series(self):\n df = DataFrame({'a': [nan, 1, 2, nan, nan],\n 'b': [1, 2, 3, nan, nan],\n 'c': [nan, 1, 2, 3, 4]})\n\n result = df.fillna({'a': 0, 'b': 5})\n\n expected = df.copy()\n expected['a'] = expected['a'].fillna(0)\n expected['b'] = expected['b'].fillna(5)\n assert_frame_equal(result, expected)\n\n # it works\n result = df.fillna({'a': 0, 'b': 5, 'd': 7})\n\n # Series treated same as dict\n result = df.fillna(df.max())\n expected = df.fillna(df.max().to_dict())\n assert_frame_equal(result, expected)\n\n # disable this for now\n with assertRaisesRegexp(NotImplementedError, 'column by column'):\n df.fillna(df.max(1), axis=1)\n\n def test_fillna_columns(self):\n df = DataFrame(np.random.randn(10, 10))\n df.values[:, ::2] = np.nan\n\n result = df.fillna(method='ffill', axis=1)\n expected = df.T.fillna(method='pad').T\n assert_frame_equal(result, expected)\n\n df.insert(6, 'foo', 5)\n result = df.fillna(method='ffill', axis=1)\n expected = df.astype(float).fillna(method='ffill', axis=1)\n assert_frame_equal(result, expected)\n\n def test_fillna_invalid_method(self):\n with assertRaisesRegexp(ValueError, 'ffil'):\n self.frame.fillna(method='ffil')\n\n def test_fillna_invalid_value(self):\n # list\n self.assertRaises(TypeError, self.frame.fillna, [1, 2])\n # tuple\n self.assertRaises(TypeError, self.frame.fillna, (1, 2))\n\n def test_replace_inplace(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n tsframe = self.tsframe.copy()\n tsframe.replace(nan, 0, inplace=True)\n assert_frame_equal(tsframe, self.tsframe.fillna(0))\n\n self.assertRaises(TypeError, self.tsframe.replace, nan, inplace=True)\n self.assertRaises(TypeError, self.tsframe.replace, nan)\n\n # mixed type\n self.mixed_frame['foo'][5:20] = nan\n self.mixed_frame['A'][-10:] = nan\n\n result = self.mixed_frame.replace(np.nan, 0)\n expected = self.mixed_frame.fillna(value=0)\n assert_frame_equal(result, expected)\n\n tsframe = self.tsframe.copy()\n tsframe.replace([nan], [0], inplace=True)\n assert_frame_equal(tsframe, self.tsframe.fillna(0))\n\n def test_regex_replace_scalar(self):\n obj = {'a': list('ab..'), 'b': list('efgh')}\n dfobj = DataFrame(obj)\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n\n ### simplest cases\n ## regex -> value\n # obj frame\n res = dfobj.replace(r'\\s*\\.\\s*', nan, regex=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.replace(r'\\s*\\.\\s*', nan, regex=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n # everything with compiled regexs as well\n res = dfobj.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1')\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1')\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n res = dfmix.replace(regex=re.compile(r'\\s*(\\.)\\s*'), value=r'\\1\\1\\1')\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n res = dfmix.replace(regex=r'\\s*(\\.)\\s*', value=r'\\1\\1\\1')\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n def test_regex_replace_scalar_inplace(self):\n obj = {'a': list('ab..'), 'b': list('efgh')}\n dfobj = DataFrame(obj)\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n\n ### simplest cases\n ## regex -> value\n # obj frame\n res = dfobj.copy()\n res.replace(r'\\s*\\.\\s*', nan, regex=True, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(r'\\s*\\.\\s*', nan, regex=True, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True, inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(r'\\s*(\\.)\\s*', r'\\1\\1\\1', regex=True, inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n # everything with compiled regexs as well\n res = dfobj.copy()\n res.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(re.compile(r'\\s*\\.\\s*'), nan, regex=True, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1', regex=True,\n inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(re.compile(r'\\s*(\\.)\\s*'), r'\\1\\1\\1', regex=True,\n inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n res = dfobj.copy()\n res.replace(regex=r'\\s*\\.\\s*', value=nan, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(regex=r'\\s*\\.\\s*', value=nan, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(regex=r'\\s*(\\.)\\s*', value=r'\\1\\1\\1', inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(regex=r'\\s*(\\.)\\s*', value=r'\\1\\1\\1', inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n # everything with compiled regexs as well\n res = dfobj.copy()\n res.replace(regex=re.compile(r'\\s*\\.\\s*'), value=nan, inplace=True)\n assert_frame_equal(dfobj, res.fillna('.'))\n\n # mixed\n res = dfmix.copy()\n res.replace(regex=re.compile(r'\\s*\\.\\s*'), value=nan, inplace=True)\n assert_frame_equal(dfmix, res.fillna('.'))\n\n ## regex -> regex\n # obj frame\n res = dfobj.copy()\n res.replace(regex=re.compile(r'\\s*(\\.)\\s*'), value=r'\\1\\1\\1',\n inplace=True)\n objc = obj.copy()\n objc['a'] = ['a', 'b', '...', '...']\n expec = DataFrame(objc)\n assert_frame_equal(res, expec)\n\n # with mixed\n res = dfmix.copy()\n res.replace(regex=re.compile(r'\\s*(\\.)\\s*'), value=r'\\1\\1\\1',\n inplace=True)\n mixc = mix.copy()\n mixc['b'] = ['a', 'b', '...', '...']\n expec = DataFrame(mixc)\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_obj(self):\n obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}\n dfobj = DataFrame(obj)\n\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'e|f|g']\n values = [nan, 'crap']\n res = dfobj.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +\n ['h'], 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(e|f|g)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfobj.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',\n 'f_crap',\n 'g_crap', 'h'],\n 'c': ['h', 'e_crap', 'l', 'o']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.replace(value=values, regex=to_replace_res)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_obj_inplace(self):\n ### same as above with inplace=True\n ## lists of regexes and values\n obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}\n dfobj = DataFrame(obj)\n\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'e|f|g']\n values = [nan, 'crap']\n res = dfobj.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': ['a', 'b', nan, nan], 'b': ['crap'] * 3 +\n ['h'], 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(e|f|g)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfobj.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e_crap',\n 'f_crap',\n 'g_crap', 'h'],\n 'c': ['h', 'e_crap', 'l', 'o']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'e']\n values = [r'\\1\\1', r'crap']\n res = dfobj.copy()\n res.replace(value=values, regex=to_replace_res, inplace=True)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['crap', 'f', 'g',\n 'h'],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_mixed(self):\n ## mixed frame to make sure this doesn't break things\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'a']\n values = [nan, 'crap']\n mix2 = {'a': lrange(4), 'b': list('ab..'), 'c': list('halo')}\n dfmix2 = DataFrame(mix2)\n res = dfmix2.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': mix2['a'], 'b': ['crap', 'b', nan, nan],\n 'c': ['h', 'crap', 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(a|b)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfmix.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',\n '..']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.replace(to_replace_res, values, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.replace(regex=to_replace_res, value=values)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_list_mixed_inplace(self):\n mix = {'a': lrange(4), 'b': list('ab..')}\n dfmix = DataFrame(mix)\n # the same inplace\n ## lists of regexes and values\n # list of [re1, re2, ..., reN] -> [v1, v2, ..., vN]\n to_replace_res = [r'\\s*\\.\\s*', r'a']\n values = [nan, 'crap']\n res = dfmix.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b', nan, nan]})\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [re1, re2, .., reN]\n to_replace_res = [r'\\s*(\\.)\\s*', r'(a|b)']\n values = [r'\\1\\1', r'\\1_crap']\n res = dfmix.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a_crap', 'b_crap', '..',\n '..']})\n\n assert_frame_equal(res, expec)\n\n # list of [re1, re2, ..., reN] -> [(re1 or v1), (re2 or v2), ..., (reN\n # or vN)]\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.copy()\n res.replace(to_replace_res, values, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n to_replace_res = [r'\\s*(\\.)\\s*', r'a', r'(b)']\n values = [r'\\1\\1', r'crap', r'\\1_crap']\n res = dfmix.copy()\n res.replace(regex=to_replace_res, value=values, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['crap', 'b_crap', '..', '..']})\n assert_frame_equal(res, expec)\n\n def test_regex_replace_dict_mixed(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n dfmix = DataFrame(mix)\n\n ## dicts\n # single dict {re1: v1}, search the whole frame\n # need test for this...\n\n # list of dicts {re1: v1, re2: v2, ..., re3: v3}, search the whole\n # frame\n res = dfmix.replace({'b': r'\\s*\\.\\s*'}, {'b': nan}, regex=True)\n res2 = dfmix.copy()\n res2.replace({'b': r'\\s*\\.\\s*'}, {'b': nan}, inplace=True, regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n # list of dicts {re1: re11, re2: re12, ..., reN: re1N}, search the\n # whole frame\n res = dfmix.replace({'b': r'\\s*(\\.)\\s*'}, {'b': r'\\1ty'}, regex=True)\n res2 = dfmix.copy()\n res2.replace({'b': r'\\s*(\\.)\\s*'}, {'b': r'\\1ty'}, inplace=True,\n regex=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n res = dfmix.replace(regex={'b': r'\\s*(\\.)\\s*'}, value={'b': r'\\1ty'})\n res2 = dfmix.copy()\n res2.replace(regex={'b': r'\\s*(\\.)\\s*'}, value={'b': r'\\1ty'},\n inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', '.ty', '.ty'], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n # scalar -> dict\n # to_replace regex, {value: value}\n expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':\n mix['c']})\n res = dfmix.replace('a', {'b': nan}, regex=True)\n res2 = dfmix.copy()\n res2.replace('a', {'b': nan}, regex=True, inplace=True)\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n res = dfmix.replace('a', {'b': nan}, regex=True)\n res2 = dfmix.copy()\n res2.replace(regex='a', value={'b': nan}, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': [nan, 'b', '.', '.'], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n\n def test_regex_replace_dict_nested(self):\n # nested dicts will not work until this is implemented for Series\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n dfmix = DataFrame(mix)\n res = dfmix.replace({'b': {r'\\s*\\.\\s*': nan}}, regex=True)\n res2 = dfmix.copy()\n res4 = dfmix.copy()\n res2.replace({'b': {r'\\s*\\.\\s*': nan}}, inplace=True, regex=True)\n res3 = dfmix.replace(regex={'b': {r'\\s*\\.\\s*': nan}})\n res4.replace(regex={'b': {r'\\s*\\.\\s*': nan}}, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n assert_frame_equal(res4, expec)\n\n def test_regex_replace_dict_nested_gh4115(self):\n df = pd.DataFrame({'Type':['Q','T','Q','Q','T'], 'tmp':2})\n expected = DataFrame({'Type': [0,1,0,0,1], 'tmp': 2})\n assert_frame_equal(df.replace({'Type': {'Q':0,'T':1}}), expected)\n\n def test_regex_replace_list_to_scalar(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n res = df.replace([r'\\s*\\.\\s*', 'a|b'], nan, regex=True)\n res2 = df.copy()\n res3 = df.copy()\n res2.replace([r'\\s*\\.\\s*', 'a|b'], nan, regex=True, inplace=True)\n res3.replace(regex=[r'\\s*\\.\\s*', 'a|b'], value=nan, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),\n 'c': [nan, nan, nan, 'd']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_str_to_numeric(self):\n # what happens when you try to replace a numeric value with a regex?\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n res = df.replace(r'\\s*\\.\\s*', 0, regex=True)\n res2 = df.copy()\n res2.replace(r'\\s*\\.\\s*', 0, inplace=True, regex=True)\n res3 = df.copy()\n res3.replace(regex=r'\\s*\\.\\s*', value=0, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', 0, 0], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_regex_list_to_numeric(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n res = df.replace([r'\\s*\\.\\s*', 'b'], 0, regex=True)\n res2 = df.copy()\n res2.replace([r'\\s*\\.\\s*', 'b'], 0, regex=True, inplace=True)\n res3 = df.copy()\n res3.replace(regex=[r'\\s*\\.\\s*', 'b'], value=0, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 0, 0, 0], 'c': ['a', 0,\n nan,\n 'd']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_series_of_regexes(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n s1 = Series({'b': r'\\s*\\.\\s*'})\n s2 = Series({'b': nan})\n res = df.replace(s1, s2, regex=True)\n res2 = df.copy()\n res2.replace(s1, s2, inplace=True, regex=True)\n res3 = df.copy()\n res3.replace(regex=s1, value=s2, inplace=True)\n expec = DataFrame({'a': mix['a'], 'b': ['a', 'b', nan, nan], 'c':\n mix['c']})\n assert_frame_equal(res, expec)\n assert_frame_equal(res2, expec)\n assert_frame_equal(res3, expec)\n\n def test_regex_replace_numeric_to_object_conversion(self):\n mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}\n df = DataFrame(mix)\n res = df.replace(0, 'a')\n expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})\n assert_frame_equal(res, expec)\n self.assertEqual(res.a.dtype, np.object_)\n\n def test_replace_regex_metachar(self):\n metachars = '[]', '()', '\\d', '\\w', '\\s'\n\n for metachar in metachars:\n df = DataFrame({'a': [metachar, 'else']})\n result = df.replace({'a': {metachar: 'paren'}})\n expected = DataFrame({'a': ['paren', 'else']})\n tm.assert_frame_equal(result, expected)\n\n def test_replace(self):\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n\n zero_filled = self.tsframe.replace(nan, -1e8)\n assert_frame_equal(zero_filled, self.tsframe.fillna(-1e8))\n assert_frame_equal(zero_filled.replace(-1e8, nan), self.tsframe)\n\n self.tsframe['A'][:5] = nan\n self.tsframe['A'][-5:] = nan\n self.tsframe['B'][:5] = -1e8\n\n # empty\n df = DataFrame(index=['a', 'b'])\n assert_frame_equal(df, df.replace(5, 7))\n\n def test_replace_list(self):\n obj = {'a': list('ab..'), 'b': list('efgh'), 'c': list('helo')}\n dfobj = DataFrame(obj)\n\n ## lists of regexes and values\n # list of [v1, v2, ..., vN] -> [v1, v2, ..., vN]\n to_replace_res = [r'.', r'e']\n values = [nan, 'crap']\n res = dfobj.replace(to_replace_res, values)\n expec = DataFrame({'a': ['a', 'b', nan, nan],\n 'b': ['crap', 'f', 'g', 'h'], 'c': ['h', 'crap',\n 'l', 'o']})\n assert_frame_equal(res, expec)\n\n # list of [v1, v2, ..., vN] -> [v1, v2, .., vN]\n to_replace_res = [r'.', r'f']\n values = [r'..', r'crap']\n res = dfobj.replace(to_replace_res, values)\n expec = DataFrame({'a': ['a', 'b', '..', '..'], 'b': ['e', 'crap', 'g',\n 'h'],\n 'c': ['h', 'e', 'l', 'o']})\n\n assert_frame_equal(res, expec)\n\n def test_replace_series_dict(self):\n # from GH 3064\n df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})\n result = df.replace(0, {'zero': 0.5, 'one': 1.0})\n expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 2.0, 'b': 1.0}})\n assert_frame_equal(result, expected)\n\n result = df.replace(0, df.mean())\n assert_frame_equal(result, expected)\n\n # series to series/dict\n df = DataFrame({'zero': {'a': 0.0, 'b': 1}, 'one': {'a': 2.0, 'b': 0}})\n s = Series({'zero': 0.0, 'one': 2.0})\n result = df.replace(s, {'zero': 0.5, 'one': 1.0})\n expected = DataFrame({'zero': {'a': 0.5, 'b': 1}, 'one': {'a': 1.0, 'b': 0.0}})\n assert_frame_equal(result, expected)\n\n result = df.replace(s, df.mean())\n assert_frame_equal(result, expected)\n\n def test_replace_convert(self):\n # gh 3907\n df = DataFrame([['foo', 'bar', 'bah'], ['bar', 'foo', 'bah']])\n m = {'foo': 1, 'bar': 2, 'bah': 3}\n rep = df.replace(m)\n expec = Series([ np.int64] * 3)\n res = rep.dtypes\n assert_series_equal(expec, res)\n\n def test_replace_mixed(self):\n self.mixed_frame['foo'][5:20] = nan\n self.mixed_frame['A'][-10:] = nan\n\n result = self.mixed_frame.replace(np.nan, -18)\n expected = self.mixed_frame.fillna(value=-18)\n assert_frame_equal(result, expected)\n assert_frame_equal(result.replace(-18, nan), self.mixed_frame)\n\n result = self.mixed_frame.replace(np.nan, -1e8)\n expected = self.mixed_frame.fillna(value=-1e8)\n assert_frame_equal(result, expected)\n assert_frame_equal(result.replace(-1e8, nan), self.mixed_frame)\n\n # int block upcasting\n df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })\n expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64') })\n result = df.replace(0, 0.5)\n assert_frame_equal(result,expected)\n\n df.replace(0, 0.5, inplace=True)\n assert_frame_equal(df,expected)\n\n # int block splitting\n df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64'), 'C' : Series([1,2],dtype='int64') })\n expected = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0.5,1],dtype='float64'), 'C' : Series([1,2],dtype='int64') })\n result = df.replace(0, 0.5)\n assert_frame_equal(result,expected)\n\n # to object block upcasting\n df = DataFrame({ 'A' : Series([1.0,2.0],dtype='float64'), 'B' : Series([0,1],dtype='int64') })\n expected = DataFrame({ 'A' : Series([1,'foo'],dtype='object'), 'B' : Series([0,1],dtype='int64') })\n result = df.replace(2, 'foo')\n assert_frame_equal(result,expected)\n\n expected = DataFrame({ 'A' : Series(['foo','bar'],dtype='object'), 'B' : Series([0,'foo'],dtype='object') })\n result = df.replace([1,2], ['foo','bar'])\n assert_frame_equal(result,expected)\n\n # test case from\n from pandas.util.testing import makeCustomDataframe as mkdf\n df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') })\n result = df.replace(3, df.mean().to_dict())\n expected = df.copy().astype('float64')\n m = df.mean()\n expected.iloc[0,0] = m[0]\n expected.iloc[1,1] = m[1]\n assert_frame_equal(result,expected)\n\n def test_replace_simple_nested_dict(self):\n df = DataFrame({'col': range(1, 5)})\n expected = DataFrame({'col': ['a', 2, 3, 'b']})\n\n result = df.replace({'col': {1: 'a', 4: 'b'}})\n tm.assert_frame_equal(expected, result)\n\n # in this case, should be the same as the not nested version\n result = df.replace({1: 'a', 4: 'b'})\n tm.assert_frame_equal(expected, result)\n\n def test_replace_simple_nested_dict_with_nonexistent_value(self):\n df = DataFrame({'col': range(1, 5)})\n expected = DataFrame({'col': ['a', 2, 3, 'b']})\n\n result = df.replace({-1: '-', 1: 'a', 4: 'b'})\n tm.assert_frame_equal(expected, result)\n\n result = df.replace({'col': {-1: '-', 1: 'a', 4: 'b'}})\n tm.assert_frame_equal(expected, result)\n\n def test_interpolate(self):\n pass\n\n def test_replace_value_is_none(self):\n self.assertRaises(TypeError, self.tsframe.replace, nan)\n orig_value = self.tsframe.iloc[0, 0]\n orig2 = self.tsframe.iloc[1, 0]\n\n self.tsframe.iloc[0, 0] = nan\n self.tsframe.iloc[1, 0] = 1\n\n result = self.tsframe.replace(to_replace={nan: 0})\n expected = self.tsframe.T.replace(to_replace={nan: 0}).T\n assert_frame_equal(result, expected)\n\n result = self.tsframe.replace(to_replace={nan: 0, 1: -1e8})\n tsframe = self.tsframe.copy()\n tsframe.iloc[0, 0] = 0\n tsframe.iloc[1, 0] = -1e8\n expected = tsframe\n assert_frame_equal(expected, result)\n self.tsframe.iloc[0, 0] = orig_value\n self.tsframe.iloc[1, 0] = orig2\n\n def test_replace_for_new_dtypes(self):\n\n # dtypes\n tsframe = self.tsframe.copy().astype(np.float32)\n tsframe['A'][:5] = nan\n tsframe['A'][-5:] = nan\n\n zero_filled = tsframe.replace(nan, -1e8)\n assert_frame_equal(zero_filled, tsframe.fillna(-1e8))\n assert_frame_equal(zero_filled.replace(-1e8, nan), tsframe)\n\n tsframe['A'][:5] = nan\n tsframe['A'][-5:] = nan\n tsframe['B'][:5] = -1e8\n\n b = tsframe['B']\n b[b == -1e8] = nan\n tsframe['B'] = b\n result = tsframe.fillna(method='bfill')\n assert_frame_equal(result, tsframe.fillna(method='bfill'))\n\n def test_replace_dtypes(self):\n # int\n df = DataFrame({'ints': [1, 2, 3]})\n result = df.replace(1, 0)\n expected = DataFrame({'ints': [0, 2, 3]})\n assert_frame_equal(result, expected)\n\n df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int32)\n result = df.replace(1, 0)\n expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int32)\n assert_frame_equal(result, expected)\n\n df = DataFrame({'ints': [1, 2, 3]}, dtype=np.int16)\n result = df.replace(1, 0)\n expected = DataFrame({'ints': [0, 2, 3]}, dtype=np.int16)\n assert_frame_equal(result, expected)\n\n # bools\n df = DataFrame({'bools': [True, False, True]})\n result = df.replace(False, True)\n self.assertTrue(result.values.all())\n\n # complex blocks\n df = DataFrame({'complex': [1j, 2j, 3j]})\n result = df.replace(1j, 0j)\n expected = DataFrame({'complex': [0j, 2j, 3j]})\n assert_frame_equal(result, expected)\n\n # datetime blocks\n prev = datetime.today()\n now = datetime.today()\n df = DataFrame({'datetime64': Index([prev, now, prev])})\n result = df.replace(prev, now)\n expected = DataFrame({'datetime64': Index([now] * 3)})\n assert_frame_equal(result, expected)\n\n def test_replace_input_formats(self):\n # both dicts\n to_rep = {'A': np.nan, 'B': 0, 'C': ''}\n values = {'A': 0, 'B': -1, 'C': 'missing'}\n df = DataFrame({'A': [np.nan, 0, np.inf], 'B': [0, 2, 5],\n 'C': ['', 'asdf', 'fd']})\n filled = df.replace(to_rep, values)\n expected = {}\n for k, v in compat.iteritems(df):\n expected[k] = v.replace(to_rep[k], values[k])\n assert_frame_equal(filled, DataFrame(expected))\n\n result = df.replace([0, 2, 5], [5, 2, 0])\n expected = DataFrame({'A': [np.nan, 5, np.inf], 'B': [5, 2, 0],\n 'C': ['', 'asdf', 'fd']})\n assert_frame_equal(result, expected)\n\n # dict to scalar\n filled = df.replace(to_rep, 0)\n expected = {}\n for k, v in compat.iteritems(df):\n expected[k] = v.replace(to_rep[k], 0)\n assert_frame_equal(filled, DataFrame(expected))\n\n self.assertRaises(TypeError, df.replace, to_rep, [np.nan, 0, ''])\n\n # scalar to dict\n values = {'A': 0, 'B': -1, 'C': 'missing'}\n df = DataFrame({'A': [np.nan, 0, np.nan], 'B': [0, 2, 5],\n 'C': ['', 'asdf', 'fd']})\n filled = df.replace(np.nan, values)\n expected = {}\n for k, v in compat.iteritems(df):\n expected[k] = v.replace(np.nan, values[k])\n assert_frame_equal(filled, DataFrame(expected))\n\n # list to list\n to_rep = [np.nan, 0, '']\n values = [-2, -1, 'missing']\n result = df.replace(to_rep, values)\n expected = df.copy()\n for i in range(len(to_rep)):\n expected.replace(to_rep[i], values[i], inplace=True)\n assert_frame_equal(result, expected)\n\n self.assertRaises(ValueError, df.replace, to_rep, values[1:])\n\n # list to scalar\n to_rep = [np.nan, 0, '']\n result = df.replace(to_rep, -1)\n expected = df.copy()\n for i in range(len(to_rep)):\n expected.replace(to_rep[i], -1, inplace=True)\n assert_frame_equal(result, expected)\n\n def test_replace_limit(self):\n pass\n\n def test_replace_dict_no_regex(self):\n answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:\n 'Disagree', 4: 'Strongly Disagree'})\n weights = {'Agree': 4, 'Disagree': 2, 'Neutral': 3, 'Strongly Agree':\n 5, 'Strongly Disagree': 1}\n expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})\n result = answer.replace(weights)\n tm.assert_series_equal(result, expected)\n\n def test_replace_series_no_regex(self):\n answer = Series({0: 'Strongly Agree', 1: 'Agree', 2: 'Neutral', 3:\n 'Disagree', 4: 'Strongly Disagree'})\n weights = Series({'Agree': 4, 'Disagree': 2, 'Neutral': 3,\n 'Strongly Agree': 5, 'Strongly Disagree': 1})\n expected = Series({0: 5, 1: 4, 2: 3, 3: 2, 4: 1})\n result = answer.replace(weights)\n tm.assert_series_equal(result, expected)\n\n def test_replace_dict_tuple_list_ordering_remains_the_same(self):\n df = DataFrame(dict(A=[nan, 1]))\n res1 = df.replace(to_replace={nan: 0, 1: -1e8})\n res2 = df.replace(to_replace=(1, nan), value=[-1e8, 0])\n res3 = df.replace(to_replace=[1, nan], value=[-1e8, 0])\n\n expected = DataFrame({'A': [0, -1e8]})\n tm.assert_frame_equal(res1, res2)\n tm.assert_frame_equal(res2, res3)\n tm.assert_frame_equal(res3, expected)\n\n def test_replace_doesnt_replace_without_regex(self):\n from pandas.compat import StringIO\n raw = \"\"\"fol T_opp T_Dir T_Enh\n 0 1 0 0 vo\n 1 2 vr 0 0\n 2 2 0 0 0\n 3 3 0 bt 0\"\"\"\n df = read_csv(StringIO(raw), sep=r'\\s+')\n res = df.replace({'\\D': 1})\n tm.assert_frame_equal(df, res)\n\n def test_replace_bool_with_string(self):\n df = DataFrame({'a': [True, False], 'b': list('ab')})\n result = df.replace(True, 'a')\n expected = DataFrame({'a': ['a', False], 'b': df.b})\n tm.assert_frame_equal(result, expected)\n\n def test_replace_pure_bool_with_string_no_op(self):\n df = DataFrame(np.random.rand(2, 2) > 0.5)\n result = df.replace('asdf', 'fdsa')\n tm.assert_frame_equal(df, result)\n\n def test_replace_bool_with_bool(self):\n df = DataFrame(np.random.rand(2, 2) > 0.5)\n result = df.replace(False, True)\n expected = DataFrame(np.ones((2, 2), dtype=bool))\n tm.assert_frame_equal(result, expected)\n\n def test_replace_with_dict_with_bool_keys(self):\n df = DataFrame({0: [True, False], 1: [False, True]})\n with tm.assertRaisesRegexp(TypeError, 'Cannot compare types .+'):\n df.replace({'asdf': 'asdb', True: 'yes'})\n\n def test_replace_truthy(self):\n df = DataFrame({'a': [True, True]})\n r = df.replace([np.inf, -np.inf], np.nan)\n e = df\n tm.assert_frame_equal(r, e)\n\n def test_replace_int_to_int_chain(self):\n df = DataFrame({'a': lrange(1, 5)})\n with tm.assertRaisesRegexp(ValueError, \"Replacement not allowed .+\"):\n df.replace({'a': dict(zip(range(1, 5), range(2, 6)))})\n\n def test_replace_str_to_str_chain(self):\n a = np.arange(1, 5)\n astr = a.astype(str)\n bstr = np.arange(2, 6).astype(str)\n df = DataFrame({'a': astr})\n with tm.assertRaisesRegexp(ValueError, \"Replacement not allowed .+\"):\n df.replace({'a': dict(zip(astr, bstr))})\n\n def test_replace_swapping_bug(self):\n df = pd.DataFrame({'a': [True, False, True]})\n res = df.replace({'a': {True: 'Y', False: 'N'}})\n expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})\n tm.assert_frame_equal(res, expect)\n\n df = pd.DataFrame({'a': [0, 1, 0]})\n res = df.replace({'a': {0: 'Y', 1: 'N'}})\n expect = pd.DataFrame({'a': ['Y', 'N', 'Y']})\n tm.assert_frame_equal(res, expect)\n\n def test_replace_period(self):\n d = {'fname':\n {'out_augmented_AUG_2011.json': pd.Period(year=2011, month=8, freq='M'),\n 'out_augmented_JAN_2011.json': pd.Period(year=2011, month=1, freq='M'),\n 'out_augmented_MAY_2012.json': pd.Period(year=2012, month=5, freq='M'),\n 'out_augmented_SUBSIDY_WEEK.json': pd.Period(year=2011, month=4, freq='M'),\n 'out_augmented_AUG_2012.json': pd.Period(year=2012, month=8, freq='M'),\n 'out_augmented_MAY_2011.json': pd.Period(year=2011, month=5, freq='M'),\n 'out_augmented_SEP_2013.json': pd.Period(year=2013, month=9, freq='M')}}\n\n df = pd.DataFrame(['out_augmented_AUG_2012.json',\n 'out_augmented_SEP_2013.json',\n 'out_augmented_SUBSIDY_WEEK.json',\n 'out_augmented_MAY_2012.json',\n 'out_augmented_MAY_2011.json',\n 'out_augmented_AUG_2011.json',\n 'out_augmented_JAN_2011.json'], columns=['fname'])\n tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))\n expected = DataFrame({'fname': [d['fname'][k]\n for k in df.fname.values]})\n result = df.replace(d)\n tm.assert_frame_equal(result, expected)\n\n def test_replace_datetime(self):\n d = {'fname':\n {'out_augmented_AUG_2011.json': pd.Timestamp('2011/08'),\n 'out_augmented_JAN_2011.json': pd.Timestamp('2011/01'),\n 'out_augmented_MAY_2012.json': pd.Timestamp('2012/05'),\n 'out_augmented_SUBSIDY_WEEK.json': pd.Timestamp('2011/04'),\n 'out_augmented_AUG_2012.json': pd.Timestamp('2012/08'),\n 'out_augmented_MAY_2011.json': pd.Timestamp('2011/05'),\n 'out_augmented_SEP_2013.json': pd.Timestamp('2013/09')}}\n\n df = pd.DataFrame(['out_augmented_AUG_2012.json',\n 'out_augmented_SEP_2013.json',\n 'out_augmented_SUBSIDY_WEEK.json',\n 'out_augmented_MAY_2012.json',\n 'out_augmented_MAY_2011.json',\n 'out_augmented_AUG_2011.json',\n 'out_augmented_JAN_2011.json'], columns=['fname'])\n tm.assert_equal(set(df.fname.values), set(d['fname'].keys()))\n expected = DataFrame({'fname': [d['fname'][k]\n for k in df.fname.values]})\n result = df.replace(d)\n tm.assert_frame_equal(result, expected)\n\n def test_combine_multiple_frames_dtypes(self):\n\n # GH 2759\n A = DataFrame(data=np.ones((10, 2)), columns=['foo', 'bar'], dtype=np.float64)\n B = DataFrame(data=np.ones((10, 2)), dtype=np.float32)\n results = pd.concat((A, B), axis=1).get_dtype_counts()\n expected = Series(dict( float64 = 2, float32 = 2 ))\n assert_series_equal(results,expected)\n\n def test_ops(self):\n\n # tst ops and reversed ops in evaluation\n # GH7198\n\n # smaller hits python, larger hits numexpr\n for n in [ 4, 4000 ]:\n\n df = DataFrame(1,index=range(n),columns=list('abcd'))\n df.iloc[0] = 2\n m = df.mean()\n\n for op_str, op, rop in [('+','__add__','__radd__'),\n ('-','__sub__','__rsub__'),\n ('*','__mul__','__rmul__'),\n ('/','__truediv__','__rtruediv__')]:\n\n base = DataFrame(np.tile(m.values,n).reshape(n,-1),columns=list('abcd'))\n expected = eval(\"base{op}df\".format(op=op_str))\n\n # ops as strings\n result = eval(\"m{op}df\".format(op=op_str))\n assert_frame_equal(result,expected)\n\n # these are commutative\n if op in ['+','*']:\n result = getattr(df,op)(m)\n assert_frame_equal(result,expected)\n\n # these are not\n elif op in ['-','/']:\n result = getattr(df,rop)(m)\n assert_frame_equal(result,expected)\n\n # GH7192\n df = DataFrame(dict(A=np.random.randn(25000)))\n df.iloc[0:5] = np.nan\n expected = (1-np.isnan(df.iloc[0:25]))\n result = (1-np.isnan(df)).iloc[0:25]\n assert_frame_equal(result,expected)\n\n def test_truncate(self):\n offset = datetools.bday\n\n ts = self.tsframe[::3]\n\n start, end = self.tsframe.index[3], self.tsframe.index[6]\n\n start_missing = self.tsframe.index[2]\n end_missing = self.tsframe.index[7]\n\n # neither specified\n truncated = ts.truncate()\n assert_frame_equal(truncated, ts)\n\n # both specified\n expected = ts[1:3]\n\n truncated = ts.truncate(start, end)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(start_missing, end_missing)\n assert_frame_equal(truncated, expected)\n\n # start specified\n expected = ts[1:]\n\n truncated = ts.truncate(before=start)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(before=start_missing)\n assert_frame_equal(truncated, expected)\n\n # end specified\n expected = ts[:3]\n\n truncated = ts.truncate(after=end)\n assert_frame_equal(truncated, expected)\n\n truncated = ts.truncate(after=end_missing)\n assert_frame_equal(truncated, expected)\n\n self.assertRaises(ValueError, ts.truncate,\n before=ts.index[-1] - 1,\n after=ts.index[0] +1)\n\n def test_truncate_copy(self):\n index = self.tsframe.index\n truncated = self.tsframe.truncate(index[5], index[10])\n truncated.values[:] = 5.\n self.assertFalse((self.tsframe.values[5:11] == 5).any())\n\n def test_xs(self):\n idx = self.frame.index[5]\n xs = self.frame.xs(idx)\n for item, value in compat.iteritems(xs):\n if np.isnan(value):\n self.assertTrue(np.isnan(self.frame[item][idx]))\n else:\n self.assertEqual(value, self.frame[item][idx])\n\n # mixed-type xs\n test_data = {\n 'A': {'1': 1, '2': 2},\n 'B': {'1': '1', '2': '2', '3': '3'},\n }\n frame = DataFrame(test_data)\n xs = frame.xs('1')\n self.assertEqual(xs.dtype, np.object_)\n self.assertEqual(xs['A'], 1)\n self.assertEqual(xs['B'], '1')\n\n with tm.assertRaises(KeyError):\n self.tsframe.xs(self.tsframe.index[0] - datetools.bday)\n\n # xs get column\n series = self.frame.xs('A', axis=1)\n expected = self.frame['A']\n assert_series_equal(series, expected)\n\n # view is returned if possible\n series = self.frame.xs('A', axis=1)\n series[:] = 5\n self.assertTrue((expected == 5).all())\n\n def test_xs_corner(self):\n # pathological mixed-type reordering case\n df = DataFrame(index=[0])\n df['A'] = 1.\n df['B'] = 'foo'\n df['C'] = 2.\n df['D'] = 'bar'\n df['E'] = 3.\n\n xs = df.xs(0)\n assert_almost_equal(xs, [1., 'foo', 2., 'bar', 3.])\n\n # no columns but index\n df = DataFrame(index=['a', 'b', 'c'])\n result = df.xs('a')\n expected = Series([])\n assert_series_equal(result, expected)\n\n def test_xs_duplicates(self):\n df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])\n\n cross = df.xs('c')\n exp = df.irow(2)\n assert_series_equal(cross, exp)\n\n def test_xs_keep_level(self):\n df = DataFrame({'day': {0: 'sat', 1: 'sun'},\n 'flavour': {0: 'strawberry', 1: 'strawberry'},\n 'sales': {0: 10, 1: 12},\n 'year': {0: 2008, 1: 2008}}).set_index(['year','flavour','day'])\n result = df.xs('sat', level='day', drop_level=False)\n expected = df[:1]\n assert_frame_equal(result, expected)\n\n result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)\n assert_frame_equal(result, expected)\n\n def test_pivot(self):\n data = {\n 'index': ['A', 'B', 'C', 'C', 'B', 'A'],\n 'columns': ['One', 'One', 'One', 'Two', 'Two', 'Two'],\n 'values': [1., 2., 3., 3., 2., 1.]\n }\n\n frame = DataFrame(data)\n pivoted = frame.pivot(\n index='index', columns='columns', values='values')\n\n expected = DataFrame({\n 'One': {'A': 1., 'B': 2., 'C': 3.},\n 'Two': {'A': 1., 'B': 2., 'C': 3.}\n })\n expected.index.name, expected.columns.name = 'index', 'columns'\n\n assert_frame_equal(pivoted, expected)\n\n # name tracking\n self.assertEqual(pivoted.index.name, 'index')\n self.assertEqual(pivoted.columns.name, 'columns')\n\n # don't specify values\n pivoted = frame.pivot(index='index', columns='columns')\n self.assertEqual(pivoted.index.name, 'index')\n self.assertEqual(pivoted.columns.names, (None, 'columns'))\n\n # pivot multiple columns\n wp = tm.makePanel()\n lp = wp.to_frame()\n df = lp.reset_index()\n assert_frame_equal(df.pivot('major', 'minor'), lp.unstack())\n\n def test_pivot_duplicates(self):\n data = DataFrame({'a': ['bar', 'bar', 'foo', 'foo', 'foo'],\n 'b': ['one', 'two', 'one', 'one', 'two'],\n 'c': [1., 2., 3., 3., 4.]})\n with assertRaisesRegexp(ValueError, 'duplicate entries'):\n data.pivot('a', 'b', 'c')\n\n def test_pivot_empty(self):\n df = DataFrame({}, columns=['a', 'b', 'c'])\n result = df.pivot('a', 'b', 'c')\n expected = DataFrame({})\n assert_frame_equal(result, expected, check_names=False)\n\n def test_pivot_integer_bug(self):\n df = DataFrame(data=[(\"A\", \"1\", \"A1\"), (\"B\", \"2\", \"B2\")])\n\n result = df.pivot(index=1, columns=0, values=2)\n repr(result)\n self.assert_numpy_array_equal(result.columns, ['A', 'B'])\n\n def test_reindex(self):\n newFrame = self.frame.reindex(self.ts1.index)\n\n for col in newFrame.columns:\n for idx, val in compat.iteritems(newFrame[col]):\n if idx in self.frame.index:\n if np.isnan(val):\n self.assertTrue(np.isnan(self.frame[col][idx]))\n else:\n self.assertEqual(val, self.frame[col][idx])\n else:\n self.assertTrue(np.isnan(val))\n\n for col, series in compat.iteritems(newFrame):\n self.assertTrue(tm.equalContents(series.index, newFrame.index))\n emptyFrame = self.frame.reindex(Index([]))\n self.assertEqual(len(emptyFrame.index), 0)\n\n # Cython code should be unit-tested directly\n nonContigFrame = self.frame.reindex(self.ts1.index[::2])\n\n for col in nonContigFrame.columns:\n for idx, val in compat.iteritems(nonContigFrame[col]):\n if idx in self.frame.index:\n if np.isnan(val):\n self.assertTrue(np.isnan(self.frame[col][idx]))\n else:\n self.assertEqual(val, self.frame[col][idx])\n else:\n self.assertTrue(np.isnan(val))\n\n for col, series in compat.iteritems(nonContigFrame):\n self.assertTrue(tm.equalContents(series.index,\n nonContigFrame.index))\n\n # corner cases\n\n # Same index, copies values but not index if copy=False\n newFrame = self.frame.reindex(self.frame.index, copy=False)\n self.assertIs(newFrame.index, self.frame.index)\n\n # length zero\n newFrame = self.frame.reindex([])\n self.assertTrue(newFrame.empty)\n self.assertEqual(len(newFrame.columns), len(self.frame.columns))\n\n # length zero with columns reindexed with non-empty index\n newFrame = self.frame.reindex([])\n newFrame = newFrame.reindex(self.frame.index)\n self.assertEqual(len(newFrame.index), len(self.frame.index))\n self.assertEqual(len(newFrame.columns), len(self.frame.columns))\n\n # pass non-Index\n newFrame = self.frame.reindex(list(self.ts1.index))\n self.assertTrue(newFrame.index.equals(self.ts1.index))\n\n # copy with no axes\n result = self.frame.reindex()\n assert_frame_equal(result,self.frame)\n self.assertFalse(result is self.frame)\n\n def test_reindex_name_remains(self):\n s = Series(random.rand(10))\n df = DataFrame(s, index=np.arange(len(s)))\n i = Series(np.arange(10), name='iname')\n\n df = df.reindex(i)\n self.assertEqual(df.index.name, 'iname')\n\n df = df.reindex(Index(np.arange(10), name='tmpname'))\n self.assertEqual(df.index.name, 'tmpname')\n\n s = Series(random.rand(10))\n df = DataFrame(s.T, index=np.arange(len(s)))\n i = Series(np.arange(10), name='iname')\n df = df.reindex(columns=i)\n self.assertEqual(df.columns.name, 'iname')\n\n def test_reindex_int(self):\n smaller = self.intframe.reindex(self.intframe.index[::2])\n\n self.assertEqual(smaller['A'].dtype, np.int64)\n\n bigger = smaller.reindex(self.intframe.index)\n self.assertEqual(bigger['A'].dtype, np.float64)\n\n smaller = self.intframe.reindex(columns=['A', 'B'])\n self.assertEqual(smaller['A'].dtype, np.int64)\n\n def test_reindex_like(self):\n other = self.frame.reindex(index=self.frame.index[:10],\n columns=['C', 'B'])\n\n assert_frame_equal(other, self.frame.reindex_like(other))\n\n def test_reindex_columns(self):\n newFrame = self.frame.reindex(columns=['A', 'B', 'E'])\n\n assert_series_equal(newFrame['B'], self.frame['B'])\n self.assertTrue(np.isnan(newFrame['E']).all())\n self.assertNotIn('C', newFrame)\n\n # length zero\n newFrame = self.frame.reindex(columns=[])\n self.assertTrue(newFrame.empty)\n\n def test_reindex_axes(self):\n\n # GH 3317, reindexing by both axes loses freq of the index\n from datetime import datetime\n df = DataFrame(np.ones((3, 3)), index=[datetime(2012, 1, 1), datetime(2012, 1, 2), datetime(2012, 1, 3)], columns=['a', 'b', 'c'])\n time_freq = date_range('2012-01-01', '2012-01-03', freq='d')\n some_cols = ['a', 'b']\n\n index_freq = df.reindex(index=time_freq).index.freq\n both_freq = df.reindex(index=time_freq, columns=some_cols).index.freq\n seq_freq = df.reindex(index=time_freq).reindex(columns=some_cols).index.freq\n self.assertEqual(index_freq, both_freq)\n self.assertEqual(index_freq, seq_freq)\n\n def test_reindex_fill_value(self):\n df = DataFrame(np.random.randn(10, 4))\n\n # axis=0\n result = df.reindex(lrange(15))\n self.assertTrue(np.isnan(result.values[-5:]).all())\n\n result = df.reindex(lrange(15), fill_value=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n # axis=1\n result = df.reindex(columns=lrange(5), fill_value=0.)\n expected = df.copy()\n expected[4] = 0.\n assert_frame_equal(result, expected)\n\n result = df.reindex(columns=lrange(5), fill_value=0)\n expected = df.copy()\n expected[4] = 0\n assert_frame_equal(result, expected)\n\n result = df.reindex(columns=lrange(5), fill_value='foo')\n expected = df.copy()\n expected[4] = 'foo'\n assert_frame_equal(result, expected)\n\n # reindex_axis\n result = df.reindex_axis(lrange(15), fill_value=0., axis=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n result = df.reindex_axis(lrange(5), fill_value=0., axis=1)\n expected = df.reindex(columns=lrange(5)).fillna(0)\n assert_frame_equal(result, expected)\n\n # other dtypes\n df['foo'] = 'foo'\n result = df.reindex(lrange(15), fill_value=0)\n expected = df.reindex(lrange(15)).fillna(0)\n assert_frame_equal(result, expected)\n\n def test_reindex_dups(self):\n\n # GH4746, reindex on duplicate index error messages\n arr = np.random.randn(10)\n df = DataFrame(arr,index=[1,2,3,4,5,1,2,3,4,5])\n\n # set index is ok\n result = df.copy()\n result.index = list(range(len(df)))\n expected = DataFrame(arr,index=list(range(len(df))))\n assert_frame_equal(result,expected)\n\n # reindex fails\n self.assertRaises(ValueError, df.reindex, index=list(range(len(df))))\n\n def test_align(self):\n af, bf = self.frame.align(self.frame)\n self.assertIsNot(af._data, self.frame._data)\n\n af, bf = self.frame.align(self.frame, copy=False)\n self.assertIs(af._data, self.frame._data)\n\n # axis = 0\n other = self.frame.ix[:-5, :3]\n af, bf = self.frame.align(other, axis=0, fill_value=-1)\n self.assertTrue(bf.columns.equals(other.columns))\n # test fill value\n join_idx = self.frame.index.join(other.index)\n diff_a = self.frame.index.diff(join_idx)\n diff_b = other.index.diff(join_idx)\n diff_a_vals = af.reindex(diff_a).values\n diff_b_vals = bf.reindex(diff_b).values\n self.assertTrue((diff_a_vals == -1).all())\n\n af, bf = self.frame.align(other, join='right', axis=0)\n self.assertTrue(bf.columns.equals(other.columns))\n self.assertTrue(bf.index.equals(other.index))\n self.assertTrue(af.index.equals(other.index))\n\n # axis = 1\n other = self.frame.ix[:-5, :3].copy()\n af, bf = self.frame.align(other, axis=1)\n self.assertTrue(bf.columns.equals(self.frame.columns))\n self.assertTrue(bf.index.equals(other.index))\n\n # test fill value\n join_idx = self.frame.index.join(other.index)\n diff_a = self.frame.index.diff(join_idx)\n diff_b = other.index.diff(join_idx)\n diff_a_vals = af.reindex(diff_a).values\n diff_b_vals = bf.reindex(diff_b).values\n self.assertTrue((diff_a_vals == -1).all())\n\n af, bf = self.frame.align(other, join='inner', axis=1)\n self.assertTrue(bf.columns.equals(other.columns))\n\n af, bf = self.frame.align(other, join='inner', axis=1, method='pad')\n self.assertTrue(bf.columns.equals(other.columns))\n\n # test other non-float types\n af, bf = self.intframe.align(other, join='inner', axis=1, method='pad')\n self.assertTrue(bf.columns.equals(other.columns))\n\n af, bf = self.mixed_frame.align(self.mixed_frame,\n join='inner', axis=1, method='pad')\n self.assertTrue(bf.columns.equals(self.mixed_frame.columns))\n\n af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=None)\n self.assertTrue(bf.index.equals(Index([])))\n\n af, bf = self.frame.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assertTrue(bf.index.equals(Index([])))\n\n # mixed floats/ints\n af, bf = self.mixed_float.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assertTrue(bf.index.equals(Index([])))\n\n af, bf = self.mixed_int.align(other.ix[:, 0], join='inner', axis=1,\n method=None, fill_value=0)\n self.assertTrue(bf.index.equals(Index([])))\n\n # try to align dataframe to series along bad axis\n self.assertRaises(ValueError, self.frame.align, af.ix[0, :3],\n join='inner', axis=2)\n\n def _check_align(self, a, b, axis, fill_axis, how, method, limit=None):\n aa, ab = a.align(b, axis=axis, join=how, method=method, limit=limit,\n fill_axis=fill_axis)\n\n join_index, join_columns = None, None\n\n ea, eb = a, b\n if axis is None or axis == 0:\n join_index = a.index.join(b.index, how=how)\n ea = ea.reindex(index=join_index)\n eb = eb.reindex(index=join_index)\n\n if axis is None or axis == 1:\n join_columns = a.columns.join(b.columns, how=how)\n ea = ea.reindex(columns=join_columns)\n eb = eb.reindex(columns=join_columns)\n\n ea = ea.fillna(axis=fill_axis, method=method, limit=limit)\n eb = eb.fillna(axis=fill_axis, method=method, limit=limit)\n\n assert_frame_equal(aa, ea)\n assert_frame_equal(ab, eb)\n\n def test_align_fill_method_inner(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('inner', meth, ax, fax)\n\n def test_align_fill_method_outer(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('outer', meth, ax, fax)\n\n def test_align_fill_method_left(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('left', meth, ax, fax)\n\n def test_align_fill_method_right(self):\n for meth in ['pad', 'bfill']:\n for ax in [0, 1, None]:\n for fax in [0, 1]:\n self._check_align_fill('right', meth, ax, fax)\n\n def _check_align_fill(self, kind, meth, ax, fax):\n left = self.frame.ix[0:4, :10]\n right = self.frame.ix[2:, 6:]\n empty = self.frame.ix[:0, :0]\n\n self._check_align(left, right, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(left, right, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # empty left\n self._check_align(empty, right, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(empty, right, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # empty right\n self._check_align(left, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(left, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n # both empty\n self._check_align(empty, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth)\n self._check_align(empty, empty, axis=ax, fill_axis=fax,\n how=kind, method=meth, limit=1)\n\n def test_align_int_fill_bug(self):\n # GH #910\n X = np.arange(10*10, dtype='float64').reshape(10, 10)\n Y = np.ones((10, 1), dtype=int)\n\n df1 = DataFrame(X)\n df1['0.X'] = Y.squeeze()\n\n df2 = df1.astype(float)\n\n result = df1 - df1.mean()\n expected = df2 - df2.mean()\n assert_frame_equal(result, expected)\n\n def test_where(self):\n default_frame = DataFrame(np.random.randn(5, 3),columns=['A','B','C'])\n\n def _safe_add(df):\n # only add to the numeric items\n def is_ok(s):\n return issubclass(s.dtype.type, (np.integer,np.floating)) and s.dtype != 'uint8'\n return DataFrame(dict([ (c,s+1) if is_ok(s) else (c,s) for c, s in compat.iteritems(df) ]))\n\n def _check_get(df, cond, check_dtypes = True):\n other1 = _safe_add(df)\n rs = df.where(cond, other1)\n rs2 = df.where(cond.values, other1)\n for k, v in rs.iteritems():\n assert_series_equal(v, Series(np.where(cond[k], df[k], other1[k]),index=v.index))\n assert_frame_equal(rs, rs2)\n\n # dtypes\n if check_dtypes:\n self.assertTrue((rs.dtypes == df.dtypes).all() == True)\n\n # check getting\n for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:\n cond = df > 0\n _check_get(df, cond)\n\n\n # upcasting case (GH # 2794)\n df = DataFrame(dict([ (c,Series([1]*3,dtype=c)) for c in ['int64','int32','float32','float64'] ]))\n df.ix[1,:] = 0\n result = df.where(df>=0).get_dtype_counts()\n\n #### when we don't preserve boolean casts ####\n #expected = Series({ 'float32' : 1, 'float64' : 3 })\n\n expected = Series({ 'float32' : 1, 'float64' : 1, 'int32' : 1, 'int64' : 1 })\n assert_series_equal(result, expected)\n\n # aligning\n def _check_align(df, cond, other, check_dtypes = True):\n rs = df.where(cond, other)\n for i, k in enumerate(rs.columns):\n result = rs[k]\n d = df[k].values\n c = cond[k].reindex(df[k].index).fillna(False).values\n\n if np.isscalar(other):\n o = other\n else:\n if isinstance(other,np.ndarray):\n o = Series(other[:,i],index=result.index).values\n else:\n o = other[k].values\n\n new_values = d if c.all() else np.where(c, d, o)\n expected = Series(new_values,index=result.index)\n\n # since we can't always have the correct numpy dtype\n # as numpy doesn't know how to downcast, don't check\n assert_series_equal(result, expected, check_dtype=False)\n\n # dtypes\n # can't check dtype when other is an ndarray\n\n if check_dtypes and not isinstance(other,np.ndarray):\n self.assertTrue((rs.dtypes == df.dtypes).all() == True)\n\n for df in [ self.mixed_frame, self.mixed_float, self.mixed_int ]:\n\n # other is a frame\n cond = (df > 0)[1:]\n _check_align(df, cond, _safe_add(df))\n\n # check other is ndarray\n cond = df > 0\n _check_align(df, cond, (_safe_add(df).values))\n\n # integers are upcast, so don't check the dtypes\n cond = df > 0\n check_dtypes = all([ not issubclass(s.type,np.integer) for s in df.dtypes ])\n _check_align(df, cond, np.nan, check_dtypes = check_dtypes)\n\n # invalid conditions\n df = default_frame\n err1 = (df + 1).values[0:2, :]\n self.assertRaises(ValueError, df.where, cond, err1)\n\n err2 = cond.ix[:2, :].values\n other1 = _safe_add(df)\n self.assertRaises(ValueError, df.where, err2, other1)\n\n self.assertRaises(ValueError, df.mask, True)\n self.assertRaises(ValueError, df.mask, 0)\n\n # where inplace\n def _check_set(df, cond, check_dtypes = True):\n dfi = df.copy()\n econd = cond.reindex_like(df).fillna(True)\n expected = dfi.mask(~econd)\n\n dfi.where(cond, np.nan, inplace=True)\n assert_frame_equal(dfi, expected)\n\n # dtypes (and confirm upcasts)x\n if check_dtypes:\n for k, v in compat.iteritems(df.dtypes):\n if issubclass(v.type,np.integer) and not cond[k].all():\n v = np.dtype('float64')\n self.assertEqual(dfi[k].dtype, v)\n\n for df in [ default_frame, self.mixed_frame, self.mixed_float, self.mixed_int ]:\n\n cond = df > 0\n _check_set(df, cond)\n\n cond = df >= 0\n _check_set(df, cond)\n\n # aligining\n cond = (df >= 0)[1:]\n _check_set(df, cond)\n\n def test_where_bug(self):\n\n # GH 2793\n\n df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [4.0, 3.0, 2.0, 1.0]}, dtype = 'float64')\n expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')\n result = df.where(df > 2, np.nan)\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(result > 2, np.nan, inplace=True)\n assert_frame_equal(result, expected)\n\n # mixed\n for dtype in ['int16','int8','int32','int64']:\n df = DataFrame({'a': np.array([1, 2, 3, 4],dtype=dtype), 'b': np.array([4.0, 3.0, 2.0, 1.0], dtype = 'float64') })\n expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [4.0, 3.0, np.nan, np.nan]}, dtype = 'float64')\n result = df.where(df > 2, np.nan)\n assert_frame_equal(result, expected)\n\n result = df.copy()\n result.where(result > 2, np.nan, inplace=True)\n assert_frame_equal(result, expected)\n\n # transpositional issue\n # GH7506\n a = DataFrame({ 0 : [1,2], 1 : [3,4], 2 : [5,6]})\n b = DataFrame({ 0 : [np.nan,8], 1:[9,np.nan], 2:[np.nan,np.nan]})\n do_not_replace = b.isnull() | (a > b)\n\n expected = a.copy()\n expected[~do_not_replace] = b\n\n result = a.where(do_not_replace,b)\n assert_frame_equal(result,expected)\n\n a = DataFrame({ 0 : [4,6], 1 : [1,0]})\n b = DataFrame({ 0 : [np.nan,3],1:[3,np.nan]})\n do_not_replace = b.isnull() | (a > b)\n\n expected = a.copy()\n expected[~do_not_replace] = b\n\n result = a.where(do_not_replace,b)\n assert_frame_equal(result,expected)\n\n def test_where_datetime(self):\n\n # GH 3311\n df = DataFrame(dict(A = date_range('20130102',periods=5),\n B = date_range('20130104',periods=5),\n C = np.random.randn(5)))\n\n stamp = datetime(2013,1,3)\n result = df[df>stamp]\n expected = df.copy()\n expected.loc[[0,1],'A'] = np.nan\n assert_frame_equal(result,expected)\n\n def test_where_none(self):\n # GH 4667\n # setting with None changes dtype\n df = DataFrame({'series': Series(range(10))}).astype(float)\n df[df > 7] = None\n expected = DataFrame({'series': Series([0,1,2,3,4,5,6,7,np.nan,np.nan]) })\n assert_frame_equal(df, expected)\n\n # GH 7656\n df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {'A': np.nan, 'B': 'Test', 'C': np.nan}])\n expected = df.where(~isnull(df), None)\n with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):\n df.where(~isnull(df), None, inplace=True)\n\n def test_where_align(self):\n\n def create():\n df = DataFrame(np.random.randn(10,3))\n df.iloc[3:5,0] = np.nan\n df.iloc[4:6,1] = np.nan\n df.iloc[5:8,2] = np.nan\n return df\n\n # series\n df = create()\n expected = df.fillna(df.mean())\n result = df.where(pd.notnull(df),df.mean(),axis='columns')\n assert_frame_equal(result, expected)\n\n df.where(pd.notnull(df),df.mean(),inplace=True,axis='columns')\n assert_frame_equal(df, expected)\n\n df = create().fillna(0)\n expected = df.apply(lambda x, y: x.where(x>0,y), y=df[0])\n result = df.where(df>0,df[0],axis='index')\n assert_frame_equal(result, expected)\n result = df.where(df>0,df[0],axis='rows')\n assert_frame_equal(result, expected)\n\n # frame\n df = create()\n expected = df.fillna(1)\n result = df.where(pd.notnull(df),DataFrame(1,index=df.index,columns=df.columns))\n assert_frame_equal(result, expected)\n\n def test_where_complex(self):\n # GH 6345\n expected = DataFrame([[1+1j, 2], [np.nan, 4+1j]], columns=['a', 'b'])\n df = DataFrame([[1+1j, 2], [5+1j, 4+1j]], columns=['a', 'b'])\n df[df.abs() >= 5] = np.nan\n assert_frame_equal(df,expected)\n\n def test_mask(self):\n df = DataFrame(np.random.randn(5, 3))\n cond = df > 0\n\n rs = df.where(cond, np.nan)\n assert_frame_equal(rs, df.mask(df <= 0))\n assert_frame_equal(rs, df.mask(~cond))\n\n def test_mask_edge_case_1xN_frame(self):\n # GH4071\n df = DataFrame([[1, 2]])\n res = df.mask(DataFrame([[True, False]]))\n expec = DataFrame([[nan, 2]])\n assert_frame_equal(res, expec)\n\n #----------------------------------------------------------------------\n # Transposing\n\n def test_transpose(self):\n frame = self.frame\n dft = frame.T\n for idx, series in compat.iteritems(dft):\n for col, value in compat.iteritems(series):\n if np.isnan(value):\n self.assertTrue(np.isnan(frame[col][idx]))\n else:\n self.assertEqual(value, frame[col][idx])\n\n # mixed type\n index, data = tm.getMixedTypeDict()\n mixed = DataFrame(data, index=index)\n\n mixed_T = mixed.T\n for col, s in compat.iteritems(mixed_T):\n self.assertEqual(s.dtype, np.object_)\n\n def test_transpose_get_view(self):\n dft = self.frame.T\n dft.values[:, 5:10] = 5\n\n self.assertTrue((self.frame.values[5:10] == 5).all())\n\n #----------------------------------------------------------------------\n # Renaming\n\n def test_rename(self):\n mapping = {\n 'A': 'a',\n 'B': 'b',\n 'C': 'c',\n 'D': 'd'\n }\n\n renamed = self.frame.rename(columns=mapping)\n renamed2 = self.frame.rename(columns=str.lower)\n\n assert_frame_equal(renamed, renamed2)\n assert_frame_equal(renamed2.rename(columns=str.upper),\n self.frame, check_names=False)\n\n # index\n data = {\n 'A': {'foo': 0, 'bar': 1}\n }\n\n # gets sorted alphabetical\n df = DataFrame(data)\n renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'})\n self.assert_numpy_array_equal(renamed.index, ['foo', 'bar'])\n\n renamed = df.rename(index=str.upper)\n self.assert_numpy_array_equal(renamed.index, ['BAR', 'FOO'])\n\n # have to pass something\n self.assertRaises(TypeError, self.frame.rename)\n\n # partial columns\n renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'})\n self.assert_numpy_array_equal(renamed.columns, ['A', 'B', 'foo', 'bar'])\n\n # other axis\n renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'})\n self.assert_numpy_array_equal(renamed.index, ['A', 'B', 'foo', 'bar'])\n\n # index with name\n index = Index(['foo', 'bar'], name='name')\n renamer = DataFrame(data, index=index)\n renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'})\n self.assert_numpy_array_equal(renamed.index, ['bar', 'foo'])\n self.assertEqual(renamed.index.name, renamer.index.name)\n\n # MultiIndex\n tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')]\n tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')]\n index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar'])\n columns = MultiIndex.from_tuples(tuples_columns, names=['fizz', 'buzz'])\n renamer = DataFrame([(0,0),(1,1)], index=index, columns=columns)\n renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'},\n columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'})\n new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')])\n new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), ('fizz2', 'buzz3')])\n self.assert_numpy_array_equal(renamed.index, new_index)\n self.assert_numpy_array_equal(renamed.columns, new_columns)\n self.assertEqual(renamed.index.names, renamer.index.names)\n self.assertEqual(renamed.columns.names, renamer.columns.names)\n\n def test_rename_nocopy(self):\n renamed = self.frame.rename(columns={'C': 'foo'}, copy=False)\n renamed['foo'] = 1.\n self.assertTrue((self.frame['C'] == 1.).all())\n\n def test_rename_inplace(self):\n self.frame.rename(columns={'C': 'foo'})\n self.assertIn('C', self.frame)\n self.assertNotIn('foo', self.frame)\n\n c_id = id(self.frame['C'])\n frame = self.frame.copy()\n frame.rename(columns={'C': 'foo'}, inplace=True)\n\n self.assertNotIn('C', frame)\n self.assertIn('foo', frame)\n self.assertNotEqual(id(frame['foo']), c_id)\n\n def test_rename_bug(self):\n # GH 5344\n # rename set ref_locs, and set_index was not resetting\n df = DataFrame({ 0 : ['foo','bar'], 1 : ['bah','bas'], 2 : [1,2]})\n df = df.rename(columns={0 : 'a'})\n df = df.rename(columns={1 : 'b'})\n df = df.set_index(['a','b'])\n df.columns = ['2001-01-01']\n expected = DataFrame([[1],[2]],index=MultiIndex.from_tuples([('foo','bah'),('bar','bas')],\n names=['a','b']),\n columns=['2001-01-01'])\n assert_frame_equal(df,expected)\n\n #----------------------------------------------------------------------\n # Time series related\n def test_diff(self):\n the_diff = self.tsframe.diff(1)\n\n assert_series_equal(the_diff['A'],\n self.tsframe['A'] - self.tsframe['A'].shift(1))\n\n # int dtype\n a = 10000000000000000\n b = a + 1\n s = Series([a, b])\n\n rs = DataFrame({'s': s}).diff()\n self.assertEqual(rs.s[1], 1)\n\n # mixed numeric\n tf = self.tsframe.astype('float32')\n the_diff = tf.diff(1)\n assert_series_equal(the_diff['A'],\n tf['A'] - tf['A'].shift(1))\n\n def test_diff_mixed_dtype(self):\n df = DataFrame(np.random.randn(5, 3))\n df['A'] = np.array([1, 2, 3, 4, 5], dtype=object)\n\n result = df.diff()\n self.assertEqual(result[0].dtype, np.float64)\n\n def test_diff_neg_n(self):\n rs = self.tsframe.diff(-1)\n xp = self.tsframe - self.tsframe.shift(-1)\n assert_frame_equal(rs, xp)\n\n def test_diff_float_n(self):\n rs = self.tsframe.diff(1.)\n xp = self.tsframe.diff(1)\n assert_frame_equal(rs, xp)\n\n def test_pct_change(self):\n rs = self.tsframe.pct_change(fill_method=None)\n assert_frame_equal(rs, self.tsframe / self.tsframe.shift(1) - 1)\n\n rs = self.tsframe.pct_change(2)\n filled = self.tsframe.fillna(method='pad')\n assert_frame_equal(rs, filled / filled.shift(2) - 1)\n\n rs = self.tsframe.pct_change(fill_method='bfill', limit=1)\n filled = self.tsframe.fillna(method='bfill', limit=1)\n assert_frame_equal(rs, filled / filled.shift(1) - 1)\n\n rs = self.tsframe.pct_change(freq='5D')\n filled = self.tsframe.fillna(method='pad')\n assert_frame_equal(rs, filled / filled.shift(freq='5D') - 1)\n\n def test_pct_change_shift_over_nas(self):\n s = Series([1., 1.5, np.nan, 2.5, 3.])\n\n df = DataFrame({'a': s, 'b': s})\n\n chg = df.pct_change()\n expected = Series([np.nan, 0.5, np.nan, 2.5 / 1.5 - 1, .2])\n edf = DataFrame({'a': expected, 'b': expected})\n assert_frame_equal(chg, edf)\n\n def test_shift(self):\n # naive shift\n shiftedFrame = self.tsframe.shift(5)\n self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))\n\n shiftedSeries = self.tsframe['A'].shift(5)\n assert_series_equal(shiftedFrame['A'], shiftedSeries)\n\n shiftedFrame = self.tsframe.shift(-5)\n self.assertTrue(shiftedFrame.index.equals(self.tsframe.index))\n\n shiftedSeries = self.tsframe['A'].shift(-5)\n assert_series_equal(shiftedFrame['A'], shiftedSeries)\n\n # shift by 0\n unshifted = self.tsframe.shift(0)\n assert_frame_equal(unshifted, self.tsframe)\n\n # shift by DateOffset\n shiftedFrame = self.tsframe.shift(5, freq=datetools.BDay())\n self.assertEqual(len(shiftedFrame), len(self.tsframe))\n\n shiftedFrame2 = self.tsframe.shift(5, freq='B')\n assert_frame_equal(shiftedFrame, shiftedFrame2)\n\n d = self.tsframe.index[0]\n shifted_d = d + datetools.BDay(5)\n assert_series_equal(self.tsframe.xs(d),\n shiftedFrame.xs(shifted_d))\n\n # shift int frame\n int_shifted = self.intframe.shift(1)\n\n # Shifting with PeriodIndex\n ps = tm.makePeriodFrame()\n shifted = ps.shift(1)\n unshifted = shifted.shift(-1)\n self.assertTrue(shifted.index.equals(ps.index))\n\n tm.assert_dict_equal(unshifted.ix[:, 0].valid(), ps.ix[:, 0],\n compare_keys=False)\n\n shifted2 = ps.shift(1, 'B')\n shifted3 = ps.shift(1, datetools.bday)\n assert_frame_equal(shifted2, shifted3)\n assert_frame_equal(ps, shifted2.shift(-1, 'B'))\n\n assertRaisesRegexp(ValueError, 'does not match PeriodIndex freq',\n ps.shift, freq='D')\n\n\n # shift other axis\n # GH 6371\n df = DataFrame(np.random.rand(10,5))\n expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)\n result = df.shift(1,axis=1)\n assert_frame_equal(result,expected)\n\n # shift named axis\n df = DataFrame(np.random.rand(10,5))\n expected = pd.concat([DataFrame(np.nan,index=df.index,columns=[0]),df.iloc[:,0:-1]],ignore_index=True,axis=1)\n result = df.shift(1,axis='columns')\n assert_frame_equal(result,expected)\n\n def test_shift_bool(self):\n df = DataFrame({'high': [True, False],\n 'low': [False, False]})\n rs = df.shift(1)\n xp = DataFrame(np.array([[np.nan, np.nan],\n [True, False]], dtype=object),\n columns=['high', 'low'])\n assert_frame_equal(rs, xp)\n\n def test_tshift(self):\n # PeriodIndex\n ps = tm.makePeriodFrame()\n shifted = ps.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_frame_equal(unshifted, ps)\n\n shifted2 = ps.tshift(freq='B')\n assert_frame_equal(shifted, shifted2)\n\n shifted3 = ps.tshift(freq=datetools.bday)\n assert_frame_equal(shifted, shifted3)\n\n assertRaisesRegexp(ValueError, 'does not match', ps.tshift, freq='M')\n\n # DatetimeIndex\n shifted = self.tsframe.tshift(1)\n unshifted = shifted.tshift(-1)\n\n assert_frame_equal(self.tsframe, unshifted)\n\n shifted2 = self.tsframe.tshift(freq=self.tsframe.index.freq)\n assert_frame_equal(shifted, shifted2)\n\n inferred_ts = DataFrame(self.tsframe.values,\n Index(np.asarray(self.tsframe.index)),\n columns=self.tsframe.columns)\n shifted = inferred_ts.tshift(1)\n unshifted = shifted.tshift(-1)\n assert_frame_equal(shifted, self.tsframe.tshift(1))\n assert_frame_equal(unshifted, inferred_ts)\n\n no_freq = self.tsframe.ix[[0, 5, 7], :]\n self.assertRaises(ValueError, no_freq.tshift)\n\n def test_apply(self):\n # ufunc\n applied = self.frame.apply(np.sqrt)\n assert_series_equal(np.sqrt(self.frame['A']), applied['A'])\n\n # aggregator\n applied = self.frame.apply(np.mean)\n self.assertEqual(applied['A'], np.mean(self.frame['A']))\n\n d = self.frame.index[0]\n applied = self.frame.apply(np.mean, axis=1)\n self.assertEqual(applied[d], np.mean(self.frame.xs(d)))\n self.assertIs(applied.index, self.frame.index) # want this\n\n # invalid axis\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])\n self.assertRaises(ValueError, df.apply, lambda x: x, 2)\n\n def test_apply_empty(self):\n # empty\n applied = self.empty.apply(np.sqrt)\n self.assertTrue(applied.empty)\n\n applied = self.empty.apply(np.mean)\n self.assertTrue(applied.empty)\n\n no_rows = self.frame[:0]\n result = no_rows.apply(lambda x: x.mean())\n expected = Series(np.nan, index=self.frame.columns)\n assert_series_equal(result, expected)\n\n no_cols = self.frame.ix[:, []]\n result = no_cols.apply(lambda x: x.mean(), axis=1)\n expected = Series(np.nan, index=self.frame.index)\n assert_series_equal(result, expected)\n\n # 2476\n xp = DataFrame(index=['a'])\n rs = xp.apply(lambda x: x['a'], axis=1)\n assert_frame_equal(xp, rs)\n\n # reduce with an empty DataFrame\n x = []\n result = self.empty.apply(x.append, axis=1, reduce=False)\n assert_frame_equal(result, self.empty)\n result = self.empty.apply(x.append, axis=1, reduce=True)\n assert_series_equal(result, Series([]))\n\n empty_with_cols = DataFrame(columns=['a', 'b', 'c'])\n result = empty_with_cols.apply(x.append, axis=1, reduce=False)\n assert_frame_equal(result, empty_with_cols)\n result = empty_with_cols.apply(x.append, axis=1, reduce=True)\n assert_series_equal(result, Series([]))\n\n # Ensure that x.append hasn't been called\n self.assertEqual(x, [])\n\n def test_apply_standard_nonunique(self):\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])\n rs = df.apply(lambda s: s[0], axis=1)\n xp = Series([1, 4, 7], ['a', 'a', 'c'])\n assert_series_equal(rs, xp)\n\n rs = df.T.apply(lambda s: s[0], axis=0)\n assert_series_equal(rs, xp)\n\n def test_apply_broadcast(self):\n broadcasted = self.frame.apply(np.mean, broadcast=True)\n agged = self.frame.apply(np.mean)\n\n for col, ts in compat.iteritems(broadcasted):\n self.assertTrue((ts == agged[col]).all())\n\n broadcasted = self.frame.apply(np.mean, axis=1, broadcast=True)\n agged = self.frame.apply(np.mean, axis=1)\n for idx in broadcasted.index:\n self.assertTrue((broadcasted.xs(idx) == agged[idx]).all())\n\n def test_apply_raw(self):\n result0 = self.frame.apply(np.mean, raw=True)\n result1 = self.frame.apply(np.mean, axis=1, raw=True)\n\n expected0 = self.frame.apply(lambda x: x.values.mean())\n expected1 = self.frame.apply(lambda x: x.values.mean(), axis=1)\n\n assert_series_equal(result0, expected0)\n assert_series_equal(result1, expected1)\n\n # no reduction\n result = self.frame.apply(lambda x: x * 2, raw=True)\n expected = self.frame * 2\n assert_frame_equal(result, expected)\n\n def test_apply_axis1(self):\n d = self.frame.index[0]\n tapplied = self.frame.apply(np.mean, axis=1)\n self.assertEqual(tapplied[d], np.mean(self.frame.xs(d)))\n\n def test_apply_ignore_failures(self):\n result = self.mixed_frame._apply_standard(np.mean, 0,\n ignore_failures=True)\n expected = self.mixed_frame._get_numeric_data().apply(np.mean)\n assert_series_equal(result, expected)\n\n def test_apply_mixed_dtype_corner(self):\n df = DataFrame({'A': ['foo'],\n 'B': [1.]})\n result = df[:0].apply(np.mean, axis=1)\n # the result here is actually kind of ambiguous, should it be a Series\n # or a DataFrame?\n expected = Series(np.nan, index=[])\n assert_series_equal(result, expected)\n\n df = DataFrame({'A': ['foo'],\n 'B': [1.]})\n result = df.apply(lambda x: x['A'], axis=1)\n expected = Series(['foo'],index=[0])\n assert_series_equal(result, expected)\n\n result = df.apply(lambda x: x['B'], axis=1)\n expected = Series([1.],index=[0])\n assert_series_equal(result, expected)\n\n def test_apply_empty_infer_type(self):\n no_cols = DataFrame(index=['a', 'b', 'c'])\n no_index = DataFrame(columns=['a', 'b', 'c'])\n\n def _check(df, f):\n test_res = f(np.array([], dtype='f8'))\n is_reduction = not isinstance(test_res, np.ndarray)\n\n def _checkit(axis=0, raw=False):\n res = df.apply(f, axis=axis, raw=raw)\n if is_reduction:\n agg_axis = df._get_agg_axis(axis)\n tm.assert_isinstance(res, Series)\n self.assertIs(res.index, agg_axis)\n else:\n tm.assert_isinstance(res, DataFrame)\n\n _checkit()\n _checkit(axis=1)\n _checkit(raw=True)\n _checkit(axis=0, raw=True)\n\n _check(no_cols, lambda x: x)\n _check(no_cols, lambda x: x.mean())\n _check(no_index, lambda x: x)\n _check(no_index, lambda x: x.mean())\n\n result = no_cols.apply(lambda x: x.mean(), broadcast=True)\n tm.assert_isinstance(result, DataFrame)\n\n def test_apply_with_args_kwds(self):\n def add_some(x, howmuch=0):\n return x + howmuch\n\n def agg_and_add(x, howmuch=0):\n return x.mean() + howmuch\n\n def subtract_and_divide(x, sub, divide=1):\n return (x - sub) / divide\n\n result = self.frame.apply(add_some, howmuch=2)\n exp = self.frame.apply(lambda x: x + 2)\n assert_frame_equal(result, exp)\n\n result = self.frame.apply(agg_and_add, howmuch=2)\n exp = self.frame.apply(lambda x: x.mean() + 2)\n assert_series_equal(result, exp)\n\n res = self.frame.apply(subtract_and_divide, args=(2,), divide=2)\n exp = self.frame.apply(lambda x: (x - 2.) / 2.)\n assert_frame_equal(res, exp)\n\n def test_apply_yield_list(self):\n result = self.frame.apply(list)\n assert_frame_equal(result, self.frame)\n\n def test_apply_reduce_Series(self):\n self.frame.ix[::2, 'A'] = np.nan\n expected = self.frame.mean(1)\n result = self.frame.apply(np.mean, axis=1)\n assert_series_equal(result, expected)\n\n def test_apply_differently_indexed(self):\n df = DataFrame(np.random.randn(20, 10))\n\n result0 = df.apply(Series.describe, axis=0)\n expected0 = DataFrame(dict((i, v.describe())\n for i, v in compat.iteritems(df)),\n columns=df.columns)\n assert_frame_equal(result0, expected0)\n\n result1 = df.apply(Series.describe, axis=1)\n expected1 = DataFrame(dict((i, v.describe())\n for i, v in compat.iteritems(df.T)),\n columns=df.index).T\n assert_frame_equal(result1, expected1)\n\n def test_apply_modify_traceback(self):\n data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n data['C'][4] = np.nan\n\n def transform(row):\n if row['C'].startswith('shin') and row['A'] == 'foo':\n row['D'] = 7\n return row\n\n def transform2(row):\n if (notnull(row['C']) and row['C'].startswith('shin')\n and row['A'] == 'foo'):\n row['D'] = 7\n return row\n\n try:\n transformed = data.apply(transform, axis=1)\n except AttributeError as e:\n self.assertEqual(len(e.args), 2)\n self.assertEqual(e.args[1], 'occurred at index 4')\n self.assertEqual(e.args[0], \"'float' object has no attribute 'startswith'\")\n\n def test_apply_bug(self):\n\n # GH 6125\n import datetime\n positions = pd.DataFrame([[1, 'ABC0', 50], [1, 'YUM0', 20],\n [1, 'DEF0', 20], [2, 'ABC1', 50],\n [2, 'YUM1', 20], [2, 'DEF1', 20]],\n columns=['a', 'market', 'position'])\n def f(r):\n return r['market']\n expected = positions.apply(f, axis=1)\n\n positions = DataFrame([[datetime.datetime(2013, 1, 1), 'ABC0', 50],\n [datetime.datetime(2013, 1, 2), 'YUM0', 20],\n [datetime.datetime(2013, 1, 3), 'DEF0', 20],\n [datetime.datetime(2013, 1, 4), 'ABC1', 50],\n [datetime.datetime(2013, 1, 5), 'YUM1', 20],\n [datetime.datetime(2013, 1, 6), 'DEF1', 20]],\n columns=['a', 'market', 'position'])\n result = positions.apply(f, axis=1)\n assert_series_equal(result,expected)\n\n def test_swapaxes(self):\n df = DataFrame(np.random.randn(10, 5))\n assert_frame_equal(df.T, df.swapaxes(0, 1))\n assert_frame_equal(df.T, df.swapaxes(1, 0))\n assert_frame_equal(df, df.swapaxes(0, 0))\n self.assertRaises(ValueError, df.swapaxes, 2, 5)\n\n def test_apply_convert_objects(self):\n data = DataFrame({'A': ['foo', 'foo', 'foo', 'foo',\n 'bar', 'bar', 'bar', 'bar',\n 'foo', 'foo', 'foo'],\n 'B': ['one', 'one', 'one', 'two',\n 'one', 'one', 'one', 'two',\n 'two', 'two', 'one'],\n 'C': ['dull', 'dull', 'shiny', 'dull',\n 'dull', 'shiny', 'shiny', 'dull',\n 'shiny', 'shiny', 'shiny'],\n 'D': np.random.randn(11),\n 'E': np.random.randn(11),\n 'F': np.random.randn(11)})\n\n result = data.apply(lambda x: x, axis=1)\n assert_frame_equal(result.convert_objects(), data)\n\n def test_apply_attach_name(self):\n result = self.frame.apply(lambda x: x.name)\n expected = Series(self.frame.columns, index=self.frame.columns)\n assert_series_equal(result, expected)\n\n result = self.frame.apply(lambda x: x.name, axis=1)\n expected = Series(self.frame.index, index=self.frame.index)\n assert_series_equal(result, expected)\n\n # non-reductions\n result = self.frame.apply(lambda x: np.repeat(x.name, len(x)))\n expected = DataFrame(np.tile(self.frame.columns,\n (len(self.frame.index), 1)),\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(result, expected)\n\n result = self.frame.apply(lambda x: np.repeat(x.name, len(x)),\n axis=1)\n expected = DataFrame(np.tile(self.frame.index,\n (len(self.frame.columns), 1)).T,\n index=self.frame.index,\n columns=self.frame.columns)\n assert_frame_equal(result, expected)\n\n def test_apply_multi_index(self):\n s = DataFrame([[1,2], [3,4], [5,6]])\n s.index = MultiIndex.from_arrays([['a','a','b'], ['c','d','d']])\n s.columns = ['col1','col2']\n res = s.apply(lambda x: Series({'min': min(x), 'max': max(x)}), 1)\n tm.assert_isinstance(res.index, MultiIndex)\n\n def test_applymap(self):\n applied = self.frame.applymap(lambda x: x * 2)\n assert_frame_equal(applied, self.frame * 2)\n result = self.frame.applymap(type)\n\n # GH #465, function returning tuples\n result = self.frame.applymap(lambda x: (x, x))\n tm.assert_isinstance(result['A'][0], tuple)\n\n # GH 2909, object conversion to float in constructor?\n df = DataFrame(data=[1,'a'])\n result = df.applymap(lambda x: x)\n self.assertEqual(result.dtypes[0], object)\n\n df = DataFrame(data=[1.,'a'])\n result = df.applymap(lambda x: x)\n self.assertEqual(result.dtypes[0], object)\n\n # GH2786\n df = DataFrame(np.random.random((3,4)))\n df2 = df.copy()\n cols = ['a','a','a','a']\n df.columns = cols\n\n expected = df2.applymap(str)\n expected.columns = cols\n result = df.applymap(str)\n assert_frame_equal(result,expected)\n\n def test_filter(self):\n # items\n filtered = self.frame.filter(['A', 'B', 'E'])\n self.assertEqual(len(filtered.columns), 2)\n self.assertNotIn('E', filtered)\n\n filtered = self.frame.filter(['A', 'B', 'E'], axis='columns')\n self.assertEqual(len(filtered.columns), 2)\n self.assertNotIn('E', filtered)\n\n # other axis\n idx = self.frame.index[0:4]\n filtered = self.frame.filter(idx, axis='index')\n expected = self.frame.reindex(index=idx)\n assert_frame_equal(filtered,expected)\n\n # like\n fcopy = self.frame.copy()\n fcopy['AA'] = 1\n\n filtered = fcopy.filter(like='A')\n self.assertEqual(len(filtered.columns), 2)\n self.assertIn('AA', filtered)\n\n # like with ints in column names\n df = DataFrame(0., index=[0, 1, 2], columns=[0, 1, '_A', '_B'])\n filtered = df.filter(like='_')\n self.assertEqual(len(filtered.columns), 2)\n\n # pass in None\n with assertRaisesRegexp(TypeError, 'Must pass'):\n self.frame.filter(items=None)\n\n # objects\n filtered = self.mixed_frame.filter(like='foo')\n self.assertIn('foo', filtered)\n\n # unicode columns, won't ascii-encode\n df = self.frame.rename(columns={'B': u('\\u2202')})\n filtered = df.filter(like='C')\n self.assertTrue('C' in filtered)\n\n def test_filter_regex_search(self):\n fcopy = self.frame.copy()\n fcopy['AA'] = 1\n\n # regex\n filtered = fcopy.filter(regex='[A]+')\n self.assertEqual(len(filtered.columns), 2)\n self.assertIn('AA', filtered)\n\n # doesn't have to be at beginning\n df = DataFrame({'aBBa': [1, 2],\n 'BBaBB': [1, 2],\n 'aCCa': [1, 2],\n 'aCCaBB': [1, 2]})\n\n result = df.filter(regex='BB')\n exp = df[[x for x in df.columns if 'BB' in x]]\n assert_frame_equal(result, exp)\n\n def test_filter_corner(self):\n empty = DataFrame()\n\n result = empty.filter([])\n assert_frame_equal(result, empty)\n\n result = empty.filter(like='foo')\n assert_frame_equal(result, empty)\n\n def test_select(self):\n f = lambda x: x.weekday() == 2\n result = self.tsframe.select(f, axis=0)\n expected = self.tsframe.reindex(\n index=self.tsframe.index[[f(x) for x in self.tsframe.index]])\n assert_frame_equal(result, expected)\n\n result = self.frame.select(lambda x: x in ('B', 'D'), axis=1)\n expected = self.frame.reindex(columns=['B', 'D'])\n\n assert_frame_equal(result, expected, check_names=False) # TODO should reindex check_names?\n\n def test_reorder_levels(self):\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]],\n names=['L0', 'L1', 'L2'])\n df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index)\n\n # no change, position\n result = df.reorder_levels([0, 1, 2])\n assert_frame_equal(df, result)\n\n # no change, labels\n result = df.reorder_levels(['L0', 'L1', 'L2'])\n assert_frame_equal(df, result)\n\n # rotate, position\n result = df.reorder_levels([1, 2, 0])\n e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']],\n labels=[[0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1],\n [0, 0, 0, 0, 0, 0]],\n names=['L1', 'L2', 'L0'])\n expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},\n index=e_idx)\n assert_frame_equal(result, expected)\n\n result = df.reorder_levels([0, 0, 0])\n e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0]],\n names=['L0', 'L0', 'L0'])\n expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)},\n index=e_idx)\n assert_frame_equal(result, expected)\n\n result = df.reorder_levels(['L0', 'L0', 'L0'])\n assert_frame_equal(result, expected)\n\n def test_sort_index(self):\n frame = DataFrame(np.arange(16).reshape(4, 4), index=[1, 2, 3, 4],\n columns=['A', 'B', 'C', 'D'])\n\n # axis=0\n unordered = frame.ix[[3, 2, 4, 1]]\n sorted_df = unordered.sort_index()\n expected = frame\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = unordered.sort_index(ascending=False)\n expected = frame[::-1]\n assert_frame_equal(sorted_df, expected)\n\n # axis=1\n unordered = frame.ix[:, ['D', 'B', 'C', 'A']]\n sorted_df = unordered.sort_index(axis=1)\n expected = frame\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = unordered.sort_index(axis=1, ascending=False)\n expected = frame.ix[:, ::-1]\n assert_frame_equal(sorted_df, expected)\n\n # by column\n sorted_df = frame.sort_index(by='A')\n indexer = frame['A'].argsort().values\n expected = frame.ix[frame.index[indexer]]\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.sort_index(by='A', ascending=False)\n indexer = indexer[::-1]\n expected = frame.ix[frame.index[indexer]]\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.sort(columns='A', ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n # GH4839\n sorted_df = frame.sort(columns=['A'], ascending=[False])\n assert_frame_equal(sorted_df, expected)\n\n # check for now\n sorted_df = frame.sort(columns='A')\n assert_frame_equal(sorted_df, expected[::-1])\n expected = frame.sort_index(by='A')\n assert_frame_equal(sorted_df, expected)\n\n\n sorted_df = frame.sort(columns=['A', 'B'], ascending=False)\n expected = frame.sort_index(by=['A', 'B'], ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.sort(columns=['A', 'B'])\n assert_frame_equal(sorted_df, expected[::-1])\n\n self.assertRaises(ValueError, frame.sort_index, axis=2, inplace=True)\n\n msg = 'When sorting by column, axis must be 0'\n with assertRaisesRegexp(ValueError, msg):\n frame.sort_index(by='A', axis=1)\n\n msg = r'Length of ascending \\(5\\) != length of by \\(2\\)'\n with assertRaisesRegexp(ValueError, msg):\n frame.sort_index(by=['A', 'B'], axis=0, ascending=[True] * 5)\n\n def test_sort_nan(self):\n # GH3917\n nan = np.nan\n df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]})\n\n # sort one column only\n expected = DataFrame(\n {'A': [nan, 1, 1, 2, 4, 6, 8],\n 'B': [5, 9, 2, nan, 5, 5, 4]},\n index=[2, 0, 3, 1, 6, 4, 5])\n sorted_df = df.sort(['A'], na_position='first')\n assert_frame_equal(sorted_df, expected)\n\n expected = DataFrame(\n {'A': [nan, 8, 6, 4, 2, 1, 1],\n 'B': [5, 4, 5, 5, nan, 9, 2]},\n index=[2, 5, 4, 6, 1, 0, 3])\n sorted_df = df.sort(['A'], na_position='first', ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n # na_position='last', order\n expected = DataFrame(\n {'A': [1, 1, 2, 4, 6, 8, nan],\n 'B': [2, 9, nan, 5, 5, 4, 5]},\n index=[3, 0, 1, 6, 4, 5, 2])\n sorted_df = df.sort(['A','B'])\n assert_frame_equal(sorted_df, expected)\n\n # na_position='first', order\n expected = DataFrame(\n {'A': [nan, 1, 1, 2, 4, 6, 8],\n 'B': [5, 2, 9, nan, 5, 5, 4]},\n index=[2, 3, 0, 1, 6, 4, 5])\n sorted_df = df.sort(['A','B'], na_position='first')\n assert_frame_equal(sorted_df, expected)\n\n # na_position='first', not order\n expected = DataFrame(\n {'A': [nan, 1, 1, 2, 4, 6, 8],\n 'B': [5, 9, 2, nan, 5, 5, 4]},\n index=[2, 0, 3, 1, 6, 4, 5])\n sorted_df = df.sort(['A','B'], ascending=[1,0], na_position='first')\n assert_frame_equal(sorted_df, expected)\n\n # na_position='last', not order\n expected = DataFrame(\n {'A': [8, 6, 4, 2, 1, 1, nan],\n 'B': [4, 5, 5, nan, 2, 9, 5]},\n index=[5, 4, 6, 1, 3, 0, 2])\n sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='last')\n assert_frame_equal(sorted_df, expected)\n\n # Test DataFrame with nan label\n df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]},\n index = [1, 2, 3, 4, 5, 6, nan])\n\n # NaN label, ascending=True, na_position='last'\n sorted_df = df.sort(kind='quicksort', ascending=True, na_position='last')\n expected = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]},\n index = [1, 2, 3, 4, 5, 6, nan])\n assert_frame_equal(sorted_df, expected)\n\n # NaN label, ascending=True, na_position='first'\n sorted_df = df.sort(na_position='first')\n expected = DataFrame({'A': [4, 1, 2, nan, 1, 6, 8],\n 'B': [5, 9, nan, 5, 2, 5, 4]},\n index = [nan, 1, 2, 3, 4, 5, 6])\n assert_frame_equal(sorted_df, expected)\n\n # NaN label, ascending=False, na_position='last'\n sorted_df = df.sort(kind='quicksort', ascending=False)\n expected = DataFrame({'A': [8, 6, 1, nan, 2, 1, 4],\n 'B': [4, 5, 2, 5, nan, 9, 5]},\n index = [6, 5, 4, 3, 2, 1, nan])\n assert_frame_equal(sorted_df, expected)\n\n # NaN label, ascending=False, na_position='first'\n sorted_df = df.sort(kind='quicksort', ascending=False, na_position='first')\n expected = DataFrame({'A': [4, 8, 6, 1, nan, 2, 1],\n 'B': [5, 4, 5, 2, 5, nan, 9]},\n index = [nan, 6, 5, 4, 3, 2, 1])\n assert_frame_equal(sorted_df, expected)\n\n def test_stable_descending_sort(self):\n # GH #6399\n df = DataFrame([[2, 'first'], [2, 'second'], [1, 'a'], [1, 'b']],\n columns=['sort_col', 'order'])\n sorted_df = df.sort_index(by='sort_col', kind='mergesort',\n ascending=False)\n assert_frame_equal(df, sorted_df)\n\n def test_stable_descending_multicolumn_sort(self):\n nan = np.nan\n df = DataFrame({'A': [1, 2, nan, 1, 6, 8, 4],\n 'B': [9, nan, 5, 2, 5, 4, 5]})\n # test stable mergesort\n expected = DataFrame(\n {'A': [nan, 8, 6, 4, 2, 1, 1],\n 'B': [5, 4, 5, 5, nan, 2, 9]},\n index=[2, 5, 4, 6, 1, 3, 0])\n sorted_df = df.sort(['A','B'], ascending=[0,1], na_position='first',\n kind='mergesort')\n assert_frame_equal(sorted_df, expected)\n\n expected = DataFrame(\n {'A': [nan, 8, 6, 4, 2, 1, 1],\n 'B': [5, 4, 5, 5, nan, 9, 2]},\n index=[2, 5, 4, 6, 1, 0, 3])\n sorted_df = df.sort(['A','B'], ascending=[0,0], na_position='first',\n kind='mergesort')\n assert_frame_equal(sorted_df, expected)\n\n def test_sort_index_multicolumn(self):\n import random\n A = np.arange(5).repeat(20)\n B = np.tile(np.arange(5), 20)\n random.shuffle(A)\n random.shuffle(B)\n frame = DataFrame({'A': A, 'B': B,\n 'C': np.random.randn(100)})\n\n result = frame.sort_index(by=['A', 'B'])\n indexer = np.lexsort((frame['B'], frame['A']))\n expected = frame.take(indexer)\n assert_frame_equal(result, expected)\n\n result = frame.sort_index(by=['A', 'B'], ascending=False)\n indexer = np.lexsort((frame['B'].rank(ascending=False),\n frame['A'].rank(ascending=False)))\n expected = frame.take(indexer)\n assert_frame_equal(result, expected)\n\n result = frame.sort_index(by=['B', 'A'])\n indexer = np.lexsort((frame['A'], frame['B']))\n expected = frame.take(indexer)\n assert_frame_equal(result, expected)\n\n def test_sort_index_inplace(self):\n frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],\n columns=['A', 'B', 'C', 'D'])\n\n # axis=0\n unordered = frame.ix[[3, 2, 4, 1]]\n a_id = id(unordered['A'])\n df = unordered.copy()\n df.sort_index(inplace=True)\n expected = frame\n assert_frame_equal(df, expected)\n self.assertNotEqual(a_id, id(df['A']))\n\n df = unordered.copy()\n df.sort_index(ascending=False, inplace=True)\n expected = frame[::-1]\n assert_frame_equal(df, expected)\n\n # axis=1\n unordered = frame.ix[:, ['D', 'B', 'C', 'A']]\n df = unordered.copy()\n df.sort_index(axis=1, inplace=True)\n expected = frame\n assert_frame_equal(df, expected)\n\n df = unordered.copy()\n df.sort_index(axis=1, ascending=False, inplace=True)\n expected = frame.ix[:, ::-1]\n assert_frame_equal(df, expected)\n\n def test_sort_index_different_sortorder(self):\n import random\n A = np.arange(20).repeat(5)\n B = np.tile(np.arange(5), 20)\n\n indexer = np.random.permutation(100)\n A = A.take(indexer)\n B = B.take(indexer)\n\n df = DataFrame({'A': A, 'B': B,\n 'C': np.random.randn(100)})\n\n result = df.sort_index(by=['A', 'B'], ascending=[1, 0])\n\n ex_indexer = np.lexsort((df.B.max() - df.B, df.A))\n expected = df.take(ex_indexer)\n assert_frame_equal(result, expected)\n\n # test with multiindex, too\n idf = df.set_index(['A', 'B'])\n\n result = idf.sort_index(ascending=[1, 0])\n expected = idf.take(ex_indexer)\n assert_frame_equal(result, expected)\n\n # also, Series!\n result = idf['C'].sort_index(ascending=[1, 0])\n assert_series_equal(result, expected['C'])\n\n def test_sort_inplace(self):\n frame = DataFrame(np.random.randn(4, 4), index=[1, 2, 3, 4],\n columns=['A', 'B', 'C', 'D'])\n\n sorted_df = frame.copy()\n sorted_df.sort(columns='A', inplace=True)\n expected = frame.sort_index(by='A')\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.copy()\n sorted_df.sort(columns='A', ascending=False, inplace=True)\n expected = frame.sort_index(by='A', ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n sorted_df = frame.copy()\n sorted_df.sort(columns=['A', 'B'], ascending=False, inplace=True)\n expected = frame.sort_index(by=['A', 'B'], ascending=False)\n assert_frame_equal(sorted_df, expected)\n\n def test_sort_index_duplicates(self):\n df = DataFrame([lrange(5,9), lrange(4)],\n columns=['a', 'a', 'b', 'b'])\n\n with assertRaisesRegexp(ValueError, 'duplicate'):\n df.sort_index(by='a')\n with assertRaisesRegexp(ValueError, 'duplicate'):\n df.sort_index(by=['a'])\n with assertRaisesRegexp(ValueError, 'duplicate'):\n # multi-column 'by' is separate codepath\n df.sort_index(by=['a', 'b'])\n\n # with multi-index\n # GH4370\n df = DataFrame(np.random.randn(4,2),columns=MultiIndex.from_tuples([('a',0),('a',1)]))\n with assertRaisesRegexp(ValueError, 'levels'):\n df.sort_index(by='a')\n\n # convert tuples to a list of tuples\n expected = df.sort_index(by=[('a',1)])\n result = df.sort_index(by=('a',1))\n assert_frame_equal(result, expected)\n\n def test_sortlevel(self):\n mi = MultiIndex.from_tuples([[1, 1, 3], [1, 1, 1]], names=list('ABC'))\n df = DataFrame([[1, 2], [3, 4]], mi)\n res = df.sortlevel('A', sort_remaining=False)\n assert_frame_equal(df, res)\n\n res = df.sortlevel(['A', 'B'], sort_remaining=False)\n assert_frame_equal(df, res)\n\n def test_sort_datetimes(self):\n\n # GH 3461, argsort / lexsort differences for a datetime column\n df = DataFrame(['a','a','a','b','c','d','e','f','g'],\n columns=['A'],\n index=date_range('20130101',periods=9))\n dts = [Timestamp(x)\n for x in ['2004-02-11','2004-01-21','2004-01-26',\n '2005-09-20','2010-10-04','2009-05-12',\n '2008-11-12','2010-09-28','2010-09-28']]\n df['B'] = dts[::2] + dts[1::2]\n df['C'] = 2.\n df['A1'] = 3.\n\n df1 = df.sort(columns='A')\n df2 = df.sort(columns=['A'])\n assert_frame_equal(df1,df2)\n\n df1 = df.sort(columns='B')\n df2 = df.sort(columns=['B'])\n assert_frame_equal(df1,df2)\n\n def test_frame_column_inplace_sort_exception(self):\n s = self.frame['A']\n with assertRaisesRegexp(ValueError, \"This Series is a view\"):\n s.sort()\n\n cp = s.copy()\n cp.sort() # it works!\n\n def test_combine_first(self):\n # disjoint\n head, tail = self.frame[:5], self.frame[5:]\n\n combined = head.combine_first(tail)\n reordered_frame = self.frame.reindex(combined.index)\n assert_frame_equal(combined, reordered_frame)\n self.assertTrue(tm.equalContents(combined.columns, self.frame.columns))\n assert_series_equal(combined['A'], reordered_frame['A'])\n\n # same index\n fcopy = self.frame.copy()\n fcopy['A'] = 1\n del fcopy['C']\n\n fcopy2 = self.frame.copy()\n fcopy2['B'] = 0\n del fcopy2['D']\n\n combined = fcopy.combine_first(fcopy2)\n\n self.assertTrue((combined['A'] == 1).all())\n assert_series_equal(combined['B'], fcopy['B'])\n assert_series_equal(combined['C'], fcopy2['C'])\n assert_series_equal(combined['D'], fcopy['D'])\n\n # overlap\n head, tail = reordered_frame[:10].copy(), reordered_frame\n head['A'] = 1\n\n combined = head.combine_first(tail)\n self.assertTrue((combined['A'][:10] == 1).all())\n\n # reverse overlap\n tail['A'][:10] = 0\n combined = tail.combine_first(head)\n self.assertTrue((combined['A'][:10] == 0).all())\n\n # no overlap\n f = self.frame[:10]\n g = self.frame[10:]\n combined = f.combine_first(g)\n assert_series_equal(combined['A'].reindex(f.index), f['A'])\n assert_series_equal(combined['A'].reindex(g.index), g['A'])\n\n # corner cases\n comb = self.frame.combine_first(self.empty)\n assert_frame_equal(comb, self.frame)\n\n comb = self.empty.combine_first(self.frame)\n assert_frame_equal(comb, self.frame)\n\n comb = self.frame.combine_first(DataFrame(index=[\"faz\", \"boo\"]))\n self.assertTrue(\"faz\" in comb.index)\n\n # #2525\n df = DataFrame({'a': [1]}, index=[datetime(2012, 1, 1)])\n df2 = DataFrame({}, columns=['b'])\n result = df.combine_first(df2)\n self.assertTrue('b' in result)\n\n def test_combine_first_mixed_bug(self):\n idx = Index(['a', 'b', 'c', 'e'])\n ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)\n ser2 = Series(['a', 'b', 'c', 'e'], index=idx)\n ser3 = Series([12, 4, 5, 97], index=idx)\n\n frame1 = DataFrame({\"col0\": ser1,\n \"col2\": ser2,\n \"col3\": ser3})\n\n idx = Index(['a', 'b', 'c', 'f'])\n ser1 = Series([5.0, -9.0, 4.0, 100.], index=idx)\n ser2 = Series(['a', 'b', 'c', 'f'], index=idx)\n ser3 = Series([12, 4, 5, 97], index=idx)\n\n frame2 = DataFrame({\"col1\": ser1,\n \"col2\": ser2,\n \"col5\": ser3})\n\n combined = frame1.combine_first(frame2)\n self.assertEqual(len(combined.columns), 5)\n\n # gh 3016 (same as in update)\n df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],\n columns=['A','B','bool1','bool2'])\n\n other = DataFrame([[45,45]],index=[0],columns=['A','B'])\n result = df.combine_first(other)\n assert_frame_equal(result, df)\n\n df.ix[0,'A'] = np.nan\n result = df.combine_first(other)\n df.ix[0,'A'] = 45\n assert_frame_equal(result, df)\n\n # doc example\n df1 = DataFrame({'A' : [1., np.nan, 3., 5., np.nan],\n 'B' : [np.nan, 2., 3., np.nan, 6.]})\n\n df2 = DataFrame({'A' : [5., 2., 4., np.nan, 3., 7.],\n 'B' : [np.nan, np.nan, 3., 4., 6., 8.]})\n\n result = df1.combine_first(df2)\n expected = DataFrame({ 'A' : [1,2,3,5,3,7.], 'B' : [np.nan,2,3,4,6,8] })\n assert_frame_equal(result,expected)\n\n # GH3552, return object dtype with bools\n df1 = DataFrame([[np.nan, 3.,True], [-4.6, np.nan, True], [np.nan, 7., False]])\n df2 = DataFrame([[-42.6, np.nan, True], [-5., 1.6, False]], index=[1, 2])\n\n result = df1.combine_first(df2)[2]\n expected = Series([True,True,False])\n assert_series_equal(result,expected)\n\n # GH 3593, converting datetime64[ns] incorrecly\n df0 = DataFrame({\"a\":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})\n df1 = DataFrame({\"a\":[None, None, None]})\n df2 = df1.combine_first(df0)\n assert_frame_equal(df2,df0)\n\n df2 = df0.combine_first(df1)\n assert_frame_equal(df2,df0)\n\n df0 = DataFrame({\"a\":[datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]})\n df1 = DataFrame({\"a\":[datetime(2000, 1, 2), None, None]})\n df2 = df1.combine_first(df0)\n result = df0.copy()\n result.iloc[0,:] = df1.iloc[0,:]\n assert_frame_equal(df2,result)\n\n df2 = df0.combine_first(df1)\n assert_frame_equal(df2,df0)\n\n def test_update(self):\n df = DataFrame([[1.5, nan, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[3.6, 2., np.nan],\n [np.nan, np.nan, 7]], index=[1, 3])\n\n df.update(other)\n\n expected = DataFrame([[1.5, nan, 3],\n [3.6, 2, 3],\n [1.5, nan, 3],\n [1.5, nan, 7.]])\n assert_frame_equal(df, expected)\n\n def test_update_dtypes(self):\n\n # gh 3016\n df = DataFrame([[1.,2.,False, True],[4.,5.,True,False]],\n columns=['A','B','bool1','bool2'])\n\n other = DataFrame([[45,45]],index=[0],columns=['A','B'])\n df.update(other)\n\n expected = DataFrame([[45.,45.,False, True],[4.,5.,True,False]],\n columns=['A','B','bool1','bool2'])\n assert_frame_equal(df, expected)\n\n def test_update_nooverwrite(self):\n df = DataFrame([[1.5, nan, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[3.6, 2., np.nan],\n [np.nan, np.nan, 7]], index=[1, 3])\n\n df.update(other, overwrite=False)\n\n expected = DataFrame([[1.5, nan, 3],\n [1.5, 2, 3],\n [1.5, nan, 3],\n [1.5, nan, 3.]])\n assert_frame_equal(df, expected)\n\n def test_update_filtered(self):\n df = DataFrame([[1.5, nan, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[3.6, 2., np.nan],\n [np.nan, np.nan, 7]], index=[1, 3])\n\n df.update(other, filter_func=lambda x: x > 2)\n\n expected = DataFrame([[1.5, nan, 3],\n [1.5, nan, 3],\n [1.5, nan, 3],\n [1.5, nan, 7.]])\n assert_frame_equal(df, expected)\n\n def test_update_raise(self):\n df = DataFrame([[1.5, 1, 3.],\n [1.5, nan, 3.],\n [1.5, nan, 3],\n [1.5, nan, 3]])\n\n other = DataFrame([[2., nan],\n [nan, 7]], index=[1, 3], columns=[1, 2])\n with assertRaisesRegexp(ValueError, \"Data overlaps\"):\n df.update(other, raise_conflict=True)\n\n def test_update_from_non_df(self):\n d = {'a': Series([1, 2, 3, 4]), 'b': Series([5, 6, 7, 8])}\n df = DataFrame(d)\n\n d['a'] = Series([5, 6, 7, 8])\n df.update(d)\n\n expected = DataFrame(d)\n\n assert_frame_equal(df, expected)\n\n d = {'a': [1, 2, 3, 4], 'b': [5, 6, 7, 8]}\n df = DataFrame(d)\n\n d['a'] = [5, 6, 7, 8]\n df.update(d)\n\n expected = DataFrame(d)\n\n assert_frame_equal(df, expected)\n\n def test_combineAdd(self):\n # trivial\n comb = self.frame.combineAdd(self.frame)\n assert_frame_equal(comb, self.frame * 2)\n\n # more rigorous\n a = DataFrame([[1., nan, nan, 2., nan]],\n columns=np.arange(5))\n b = DataFrame([[2., 3., nan, 2., 6., nan]],\n columns=np.arange(6))\n expected = DataFrame([[3., 3., nan, 4., 6., nan]],\n columns=np.arange(6))\n\n result = a.combineAdd(b)\n assert_frame_equal(result, expected)\n result2 = a.T.combineAdd(b.T)\n assert_frame_equal(result2, expected.T)\n\n expected2 = a.combine(b, operator.add, fill_value=0.)\n assert_frame_equal(expected, expected2)\n\n # corner cases\n comb = self.frame.combineAdd(self.empty)\n assert_frame_equal(comb, self.frame)\n\n comb = self.empty.combineAdd(self.frame)\n assert_frame_equal(comb, self.frame)\n\n # integer corner case\n df1 = DataFrame({'x': [5]})\n df2 = DataFrame({'x': [1]})\n df3 = DataFrame({'x': [6]})\n comb = df1.combineAdd(df2)\n assert_frame_equal(comb, df3)\n\n # mixed type GH2191\n df1 = DataFrame({'A': [1, 2], 'B': [3, 4]})\n df2 = DataFrame({'A': [1, 2], 'C': [5, 6]})\n rs = df1.combineAdd(df2)\n xp = DataFrame({'A': [2, 4], 'B': [3, 4.], 'C': [5, 6.]})\n assert_frame_equal(xp, rs)\n\n # TODO: test integer fill corner?\n\n def test_combineMult(self):\n # trivial\n comb = self.frame.combineMult(self.frame)\n\n assert_frame_equal(comb, self.frame ** 2)\n\n # corner cases\n comb = self.frame.combineMult(self.empty)\n assert_frame_equal(comb, self.frame)\n\n comb = self.empty.combineMult(self.frame)\n assert_frame_equal(comb, self.frame)\n\n def test_combine_generic(self):\n df1 = self.frame\n df2 = self.frame.ix[:-5, ['A', 'B', 'C']]\n\n combined = df1.combine(df2, np.add)\n combined2 = df2.combine(df1, np.add)\n self.assertTrue(combined['D'].isnull().all())\n self.assertTrue(combined2['D'].isnull().all())\n\n chunk = combined.ix[:-5, ['A', 'B', 'C']]\n chunk2 = combined2.ix[:-5, ['A', 'B', 'C']]\n\n exp = self.frame.ix[:-5, ['A', 'B', 'C']].reindex_like(chunk) * 2\n assert_frame_equal(chunk, exp)\n assert_frame_equal(chunk2, exp)\n\n def test_clip(self):\n median = self.frame.median().median()\n\n capped = self.frame.clip_upper(median)\n self.assertFalse((capped.values > median).any())\n\n floored = self.frame.clip_lower(median)\n self.assertFalse((floored.values < median).any())\n\n double = self.frame.clip(upper=median, lower=median)\n self.assertFalse((double.values != median).any())\n\n def test_dataframe_clip(self):\n\n # GH #2747\n df = DataFrame(np.random.randn(1000,2))\n\n for lb, ub in [(-1,1),(1,-1)]:\n clipped_df = df.clip(lb, ub)\n\n lb, ub = min(lb,ub), max(ub,lb)\n lb_mask = df.values <= lb\n ub_mask = df.values >= ub\n mask = ~lb_mask & ~ub_mask\n self.assertTrue((clipped_df.values[lb_mask] == lb).all() == True)\n self.assertTrue((clipped_df.values[ub_mask] == ub).all() == True)\n self.assertTrue((clipped_df.values[mask] == df.values[mask]).all() == True)\n\n def test_get_X_columns(self):\n # numeric and object columns\n\n df = DataFrame({'a': [1, 2, 3],\n 'b' : [True, False, True],\n 'c': ['foo', 'bar', 'baz'],\n 'd': [None, None, None],\n 'e': [3.14, 0.577, 2.773]})\n\n self.assert_numpy_array_equal(df._get_numeric_data().columns,\n ['a', 'b', 'e'])\n\n def test_is_mixed_type(self):\n self.assertFalse(self.frame._is_mixed_type)\n self.assertTrue(self.mixed_frame._is_mixed_type)\n\n def test_get_numeric_data(self):\n intname = np.dtype(np.int_).name\n floatname = np.dtype(np.float_).name\n datetime64name = np.dtype('M8[ns]').name\n objectname = np.dtype(np.object_).name\n\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo', 'f' : Timestamp('20010102')},\n index=np.arange(10))\n result = df.get_dtype_counts()\n expected = Series({'int64': 1, 'float64' : 1, datetime64name: 1, objectname : 1})\n result.sort_index()\n expected.sort_index()\n assert_series_equal(result, expected)\n\n df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',\n 'd' : np.array([1.]*10,dtype='float32'),\n 'e' : np.array([1]*10,dtype='int32'),\n 'f' : np.array([1]*10,dtype='int16'),\n 'g' : Timestamp('20010102')},\n index=np.arange(10))\n\n result = df._get_numeric_data()\n expected = df.ix[:, ['a', 'b','d','e','f']]\n assert_frame_equal(result, expected)\n\n only_obj = df.ix[:, ['c','g']]\n result = only_obj._get_numeric_data()\n expected = df.ix[:, []]\n assert_frame_equal(result, expected)\n\n df = DataFrame.from_dict({'a':[1,2], 'b':['foo','bar'],'c':[np.pi,np.e]})\n result = df._get_numeric_data()\n expected = DataFrame.from_dict({'a':[1,2], 'c':[np.pi,np.e]})\n assert_frame_equal(result, expected)\n\n df = result.copy()\n result = df._get_numeric_data()\n expected = df\n assert_frame_equal(result, expected)\n\n def test_bool_describe_in_mixed_frame(self):\n df = DataFrame({\n 'string_data': ['a', 'b', 'c', 'd', 'e'],\n 'bool_data': [True, True, False, False, False],\n 'int_data': [10, 20, 30, 40, 50],\n })\n\n # Boolean data and integer data is included in .describe() output, string data isn't\n self.assert_numpy_array_equal(df.describe().columns, ['bool_data', 'int_data'])\n\n bool_describe = df.describe()['bool_data']\n\n # Both the min and the max values should stay booleans\n self.assertEqual(bool_describe['min'].dtype, np.bool_)\n self.assertEqual(bool_describe['max'].dtype, np.bool_)\n\n self.assertFalse(bool_describe['min'])\n self.assertTrue(bool_describe['max'])\n\n # For numeric operations, like mean or median, the values True/False are cast to\n # the integer values 1 and 0\n assert_almost_equal(bool_describe['mean'], 0.4)\n assert_almost_equal(bool_describe['50%'], 0)\n\n def test_reduce_mixed_frame(self):\n # GH 6806\n df = DataFrame({\n 'bool_data': [True, True, False, False, False],\n 'int_data': [10, 20, 30, 40, 50],\n 'string_data': ['a', 'b', 'c', 'd', 'e'],\n })\n df.reindex(columns=['bool_data', 'int_data', 'string_data'])\n test = df.sum(axis=0)\n assert_almost_equal(test.values, [2, 150, 'abcde'])\n assert_series_equal(test, df.T.sum(axis=1))\n\n def test_count(self):\n f = lambda s: notnull(s).sum()\n self._check_stat_op('count', f,\n has_skipna=False,\n has_numeric_only=True,\n check_dtype=False,\n check_dates=True)\n\n # corner case\n frame = DataFrame()\n ct1 = frame.count(1)\n tm.assert_isinstance(ct1, Series)\n\n ct2 = frame.count(0)\n tm.assert_isinstance(ct2, Series)\n\n # GH #423\n df = DataFrame(index=lrange(10))\n result = df.count(1)\n expected = Series(0, index=df.index)\n assert_series_equal(result, expected)\n\n df = DataFrame(columns=lrange(10))\n result = df.count(0)\n expected = Series(0, index=df.columns)\n assert_series_equal(result, expected)\n\n df = DataFrame()\n result = df.count()\n expected = Series(0, index=[])\n assert_series_equal(result, expected)\n\n def test_sum(self):\n self._check_stat_op('sum', np.sum, has_numeric_only=True)\n\n # mixed types (with upcasting happening)\n self._check_stat_op('sum', np.sum, frame=self.mixed_float.astype('float32'),\n has_numeric_only=True, check_dtype=False, check_less_precise=True)\n\n def test_stat_operators_attempt_obj_array(self):\n data = {\n 'a': [-0.00049987540199591344, -0.0016467257772919831,\n 0.00067695870775883013],\n 'b': [-0, -0, 0.0],\n 'c': [0.00031111847529610595, 0.0014902627951905339,\n -0.00094099200035979691]\n }\n df1 = DataFrame(data, index=['foo', 'bar', 'baz'],\n dtype='O')\n methods = ['sum', 'mean', 'prod', 'var', 'std', 'skew', 'min', 'max']\n\n # GH #676\n df2 = DataFrame({0: [np.nan, 2], 1: [np.nan, 3],\n 2: [np.nan, 4]}, dtype=object)\n\n for df in [df1, df2]:\n for meth in methods:\n self.assertEqual(df.values.dtype, np.object_)\n result = getattr(df, meth)(1)\n expected = getattr(df.astype('f8'), meth)(1)\n assert_series_equal(result, expected)\n\n def test_mean(self):\n self._check_stat_op('mean', np.mean, check_dates=True)\n\n def test_product(self):\n self._check_stat_op('product', np.prod)\n\n def test_median(self):\n def wrapper(x):\n if isnull(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper, check_dates=True)\n\n def test_min(self):\n self._check_stat_op('min', np.min, check_dates=True)\n self._check_stat_op('min', np.min, frame=self.intframe)\n\n def test_cummin(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cummin = self.tsframe.cummin()\n expected = self.tsframe.apply(Series.cummin)\n assert_frame_equal(cummin, expected)\n\n # axis = 1\n cummin = self.tsframe.cummin(axis=1)\n expected = self.tsframe.apply(Series.cummin, axis=1)\n assert_frame_equal(cummin, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cummin()\n\n # fix issue\n cummin_xs = self.tsframe.cummin(axis=1)\n self.assertEqual(np.shape(cummin_xs), np.shape(self.tsframe))\n\n def test_cummax(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cummax = self.tsframe.cummax()\n expected = self.tsframe.apply(Series.cummax)\n assert_frame_equal(cummax, expected)\n\n # axis = 1\n cummax = self.tsframe.cummax(axis=1)\n expected = self.tsframe.apply(Series.cummax, axis=1)\n assert_frame_equal(cummax, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cummax()\n\n # fix issue\n cummax_xs = self.tsframe.cummax(axis=1)\n self.assertEqual(np.shape(cummax_xs), np.shape(self.tsframe))\n\n def test_max(self):\n self._check_stat_op('max', np.max, check_dates=True)\n self._check_stat_op('max', np.max, frame=self.intframe)\n\n def test_mad(self):\n f = lambda x: np.abs(x - x.mean()).mean()\n self._check_stat_op('mad', f)\n\n def test_var_std(self):\n alt = lambda x: np.var(x, ddof=1)\n self._check_stat_op('var', alt)\n\n alt = lambda x: np.std(x, ddof=1)\n self._check_stat_op('std', alt)\n\n result = self.tsframe.std(ddof=4)\n expected = self.tsframe.apply(lambda x: x.std(ddof=4))\n assert_almost_equal(result, expected)\n\n result = self.tsframe.var(ddof=4)\n expected = self.tsframe.apply(lambda x: x.var(ddof=4))\n assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nanvar(arr, axis=0)\n self.assertFalse((result < 0).any())\n if nanops._USE_BOTTLENECK:\n nanops._USE_BOTTLENECK = False\n result = nanops.nanvar(arr, axis=0)\n self.assertFalse((result < 0).any())\n nanops._USE_BOTTLENECK = True\n\n def test_sem(self):\n alt = lambda x: np.std(x, ddof=1)/np.sqrt(len(x))\n self._check_stat_op('sem', alt)\n\n result = self.tsframe.sem(ddof=4)\n expected = self.tsframe.apply(lambda x: x.std(ddof=4)/np.sqrt(len(x)))\n assert_almost_equal(result, expected)\n\n arr = np.repeat(np.random.random((1, 1000)), 1000, 0)\n result = nanops.nansem(arr, axis=0)\n self.assertFalse((result < 0).any())\n if nanops._USE_BOTTLENECK:\n nanops._USE_BOTTLENECK = False\n result = nanops.nansem(arr, axis=0)\n self.assertFalse((result < 0).any())\n nanops._USE_BOTTLENECK = True\n\n def test_skew(self):\n tm._skip_if_no_scipy()\n from scipy.stats import skew\n\n def alt(x):\n if len(x) < 3:\n return np.nan\n return skew(x, bias=False)\n\n self._check_stat_op('skew', alt)\n\n def test_kurt(self):\n tm._skip_if_no_scipy()\n\n from scipy.stats import kurtosis\n\n def alt(x):\n if len(x) < 4:\n return np.nan\n return kurtosis(x, bias=False)\n\n self._check_stat_op('kurt', alt)\n\n index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]],\n labels=[[0, 0, 0, 0, 0, 0],\n [0, 1, 2, 0, 1, 2],\n [0, 1, 0, 1, 0, 1]])\n df = DataFrame(np.random.randn(6, 3), index=index)\n assert_series_equal(df.kurt(), df.kurt(level=0).xs('bar'))\n\n def _check_stat_op(self, name, alternative, frame=None, has_skipna=True,\n has_numeric_only=False, check_dtype=True, check_dates=False,\n check_less_precise=False):\n if frame is None:\n frame = self.frame\n # set some NAs\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n\n f = getattr(frame, name)\n\n if check_dates:\n df = DataFrame({'b': date_range('1/1/2001', periods=2)})\n _f = getattr(df, name)\n result = _f()\n self.assertIsInstance(result, Series)\n\n df['a'] = lrange(len(df))\n result = getattr(df, name)()\n self.assertIsInstance(result, Series)\n self.assertTrue(len(result))\n\n if has_skipna:\n def skipna_wrapper(x):\n nona = x.dropna()\n if len(nona) == 0:\n return np.nan\n return alternative(nona)\n\n def wrapper(x):\n return alternative(x.values)\n\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n assert_series_equal(result0, frame.apply(wrapper),\n check_dtype=check_dtype,\n check_less_precise=check_less_precise)\n assert_series_equal(result1, frame.apply(wrapper, axis=1),\n check_dtype=False,\n check_less_precise=check_less_precise) # HACK: win32\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n assert_series_equal(result0, frame.apply(skipna_wrapper),\n check_dtype=check_dtype,\n check_less_precise=check_less_precise)\n assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),\n check_dtype=False,\n check_less_precise=check_less_precise)\n\n # check dtypes\n if check_dtype:\n lcd_dtype = frame.values.dtype\n self.assertEqual(lcd_dtype, result0.dtype)\n self.assertEqual(lcd_dtype, result1.dtype)\n\n # result = f(axis=1)\n # comp = frame.apply(alternative, axis=1).reindex(result.index)\n # assert_series_equal(result, comp)\n\n # bad axis\n assertRaisesRegexp(ValueError, 'No axis named 2', f, axis=2)\n # make sure works on mixed-type frame\n getattr(self.mixed_frame, name)(axis=0)\n getattr(self.mixed_frame, name)(axis=1)\n\n if has_numeric_only:\n getattr(self.mixed_frame, name)(axis=0, numeric_only=True)\n getattr(self.mixed_frame, name)(axis=1, numeric_only=True)\n getattr(self.frame, name)(axis=0, numeric_only=False)\n getattr(self.frame, name)(axis=1, numeric_only=False)\n\n # all NA case\n if has_skipna:\n all_na = self.frame * np.NaN\n r0 = getattr(all_na, name)(axis=0)\n r1 = getattr(all_na, name)(axis=1)\n self.assertTrue(np.isnan(r0).all())\n self.assertTrue(np.isnan(r1).all())\n\n def test_mode(self):\n df = pd.DataFrame({\"A\": [12, 12, 11, 12, 19, 11],\n \"B\": [10, 10, 10, np.nan, 3, 4],\n \"C\": [8, 8, 8, 9, 9, 9],\n \"D\": range(6),\n \"E\": [8, 8, 1, 1, 3, 3]})\n assert_frame_equal(df[[\"A\"]].mode(),\n pd.DataFrame({\"A\": [12]}))\n assert_frame_equal(df[[\"D\"]].mode(),\n pd.DataFrame(pd.Series([], dtype=\"int64\"),\n columns=[\"D\"]))\n assert_frame_equal(df[[\"E\"]].mode(),\n pd.DataFrame(pd.Series([1, 3, 8], dtype=\"int64\"),\n columns=[\"E\"]))\n assert_frame_equal(df[[\"A\", \"B\"]].mode(),\n pd.DataFrame({\"A\": [12], \"B\": [10.]}))\n assert_frame_equal(df.mode(),\n pd.DataFrame({\"A\": [12, np.nan, np.nan],\n \"B\": [10, np.nan, np.nan],\n \"C\": [8, 9, np.nan],\n \"D\": [np.nan, np.nan, np.nan],\n \"E\": [1, 3, 8]}))\n\n # outputs in sorted order\n df[\"C\"] = list(reversed(df[\"C\"]))\n com.pprint_thing(df[\"C\"])\n com.pprint_thing(df[\"C\"].mode())\n a, b = (df[[\"A\", \"B\", \"C\"]].mode(),\n pd.DataFrame({\"A\": [12, np.nan],\n \"B\": [10, np.nan],\n \"C\": [8, 9]}))\n com.pprint_thing(a)\n com.pprint_thing(b)\n assert_frame_equal(a, b)\n # should work with heterogeneous types\n df = pd.DataFrame({\"A\": range(6),\n \"B\": pd.date_range('2011', periods=6),\n \"C\": list('abcdef')})\n exp = pd.DataFrame({\"A\": pd.Series([], dtype=df[\"A\"].dtype),\n \"B\": pd.Series([], dtype=df[\"B\"].dtype),\n \"C\": pd.Series([], dtype=df[\"C\"].dtype)})\n assert_frame_equal(df.mode(), exp)\n\n # and also when not empty\n df.loc[1, \"A\"] = 0\n df.loc[4, \"B\"] = df.loc[3, \"B\"]\n df.loc[5, \"C\"] = 'e'\n exp = pd.DataFrame({\"A\": pd.Series([0], dtype=df[\"A\"].dtype),\n \"B\": pd.Series([df.loc[3, \"B\"]], dtype=df[\"B\"].dtype),\n \"C\": pd.Series(['e'], dtype=df[\"C\"].dtype)})\n\n assert_frame_equal(df.mode(), exp)\n\n def test_sum_corner(self):\n axis0 = self.empty.sum(0)\n axis1 = self.empty.sum(1)\n tm.assert_isinstance(axis0, Series)\n tm.assert_isinstance(axis1, Series)\n self.assertEqual(len(axis0), 0)\n self.assertEqual(len(axis1), 0)\n\n def test_sum_object(self):\n values = self.frame.values.astype(int)\n frame = DataFrame(values, index=self.frame.index,\n columns=self.frame.columns)\n deltas = frame * timedelta(1)\n deltas.sum()\n\n def test_sum_bool(self):\n # ensure this works, bug report\n bools = np.isnan(self.frame)\n bools.sum(1)\n bools.sum(0)\n\n def test_mean_corner(self):\n # unit test when have object data\n the_mean = self.mixed_frame.mean(axis=0)\n the_sum = self.mixed_frame.sum(axis=0, numeric_only=True)\n self.assertTrue(the_sum.index.equals(the_mean.index))\n self.assertTrue(len(the_mean.index) < len(self.mixed_frame.columns))\n\n # xs sum mixed type, just want to know it works...\n the_mean = self.mixed_frame.mean(axis=1)\n the_sum = self.mixed_frame.sum(axis=1, numeric_only=True)\n self.assertTrue(the_sum.index.equals(the_mean.index))\n\n # take mean of boolean column\n self.frame['bool'] = self.frame['A'] > 0\n means = self.frame.mean(0)\n self.assertEqual(means['bool'], self.frame['bool'].values.mean())\n\n def test_stats_mixed_type(self):\n # don't blow up\n self.mixed_frame.std(1)\n self.mixed_frame.var(1)\n self.mixed_frame.mean(1)\n self.mixed_frame.skew(1)\n\n def test_median_corner(self):\n def wrapper(x):\n if isnull(x).any():\n return np.nan\n return np.median(x)\n\n self._check_stat_op('median', wrapper, frame=self.intframe,\n check_dtype=False, check_dates=True)\n\n def test_quantile(self):\n from numpy import percentile\n\n q = self.tsframe.quantile(0.1, axis=0)\n self.assertEqual(q['A'], percentile(self.tsframe['A'], 10))\n q = self.tsframe.quantile(0.9, axis=1)\n q = self.intframe.quantile(0.1)\n self.assertEqual(q['A'], percentile(self.intframe['A'], 10))\n\n # test degenerate case\n q = DataFrame({'x': [], 'y': []}).quantile(0.1, axis=0)\n assert(np.isnan(q['x']) and np.isnan(q['y']))\n\n # non-numeric exclusion\n df = DataFrame({'col1':['A','A','B','B'], 'col2':[1,2,3,4]})\n rs = df.quantile(0.5)\n xp = df.median()\n assert_series_equal(rs, xp)\n\n # axis\n df = DataFrame({\"A\": [1, 2, 3], \"B\": [2, 3, 4]}, index=[1, 2, 3])\n result = df.quantile(.5, axis=1)\n expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3])\n assert_series_equal(result, expected)\n\n result = df.quantile([.5, .75], axis=1)\n expected = DataFrame({1: [1.5, 1.75], 2: [2.5, 2.75],\n 3: [3.5, 3.75]}, index=[\"0.5\", \"0.75\"])\n assert_frame_equal(result, expected)\n\n # We may want to break API in the future to change this\n # so that we exclude non-numeric along the same axis\n # See GH #7312\n df = DataFrame([[1, 2, 3],\n ['a', 'b', 4]])\n result = df.quantile(.5, axis=1)\n expected = Series([3., 4.], index=[0, 1])\n assert_series_equal(result, expected)\n\n def test_quantile_multi(self):\n df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]],\n columns=['a', 'b', 'c'])\n result = df.quantile([.25, .5])\n expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],\n index=[.25, .5], columns=['a', 'b', 'c'])\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.quantile([.25, .5], axis=1)\n expected = DataFrame([[1.5, 1.5, 1.5], [2., 2., 2.]],\n index=[.25, .5], columns=[0, 1, 2])\n\n # empty\n result = DataFrame({'x': [], 'y': []}).quantile([0.1, .9], axis=0)\n expected = DataFrame({'x': [np.nan, np.nan], 'y': [np.nan, np.nan]},\n index=[.1, .9])\n assert_frame_equal(result, expected)\n\n def test_quantile_datetime(self):\n df = DataFrame({'a': pd.to_datetime(['2010', '2011']), 'b': [0, 5]})\n\n # exclude datetime\n result = df.quantile(.5)\n expected = Series([2.5], index=['b'])\n\n # datetime\n result = df.quantile(.5, numeric_only=False)\n expected = Series([Timestamp('2010-07-02 12:00:00'), 2.5],\n index=['a', 'b'])\n assert_series_equal(result, expected)\n\n # datetime w/ multi\n result = df.quantile([.5], numeric_only=False)\n expected = DataFrame([[Timestamp('2010-07-02 12:00:00'), 2.5]],\n index=[.5], columns=['a', 'b'])\n assert_frame_equal(result, expected)\n\n # axis = 1\n df['c'] = pd.to_datetime(['2011', '2012'])\n result = df[['a', 'c']].quantile(.5, axis=1, numeric_only=False)\n expected = Series([Timestamp('2010-07-02 12:00:00'),\n Timestamp('2011-07-02 12:00:00')],\n index=[0, 1])\n assert_series_equal(result, expected)\n\n result = df[['a', 'c']].quantile([.5], axis=1, numeric_only=False)\n expected = DataFrame([[Timestamp('2010-07-02 12:00:00'),\n Timestamp('2011-07-02 12:00:00')]],\n index=[0.5], columns=[0, 1])\n assert_frame_equal(result, expected)\n\n def test_cumsum(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cumsum = self.tsframe.cumsum()\n expected = self.tsframe.apply(Series.cumsum)\n assert_frame_equal(cumsum, expected)\n\n # axis = 1\n cumsum = self.tsframe.cumsum(axis=1)\n expected = self.tsframe.apply(Series.cumsum, axis=1)\n assert_frame_equal(cumsum, expected)\n\n # works\n df = DataFrame({'A': np.arange(20)}, index=np.arange(20))\n result = df.cumsum()\n\n # fix issue\n cumsum_xs = self.tsframe.cumsum(axis=1)\n self.assertEqual(np.shape(cumsum_xs), np.shape(self.tsframe))\n\n def test_cumprod(self):\n self.tsframe.ix[5:10, 0] = nan\n self.tsframe.ix[10:15, 1] = nan\n self.tsframe.ix[15:, 2] = nan\n\n # axis = 0\n cumprod = self.tsframe.cumprod()\n expected = self.tsframe.apply(Series.cumprod)\n assert_frame_equal(cumprod, expected)\n\n # axis = 1\n cumprod = self.tsframe.cumprod(axis=1)\n expected = self.tsframe.apply(Series.cumprod, axis=1)\n assert_frame_equal(cumprod, expected)\n\n # fix issue\n cumprod_xs = self.tsframe.cumprod(axis=1)\n self.assertEqual(np.shape(cumprod_xs), np.shape(self.tsframe))\n\n # ints\n df = self.tsframe.fillna(0).astype(int)\n df.cumprod(0)\n df.cumprod(1)\n\n # ints32\n df = self.tsframe.fillna(0).astype(np.int32)\n df.cumprod(0)\n df.cumprod(1)\n\n def test_rank(self):\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n self.frame['A'][::2] = np.nan\n self.frame['B'][::3] = np.nan\n self.frame['C'][::4] = np.nan\n self.frame['D'][::5] = np.nan\n\n ranks0 = self.frame.rank()\n ranks1 = self.frame.rank(1)\n mask = np.isnan(self.frame.values)\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fvals)\n exp0[mask] = np.nan\n\n exp1 = np.apply_along_axis(rankdata, 1, fvals)\n exp1[mask] = np.nan\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # integers\n df = DataFrame(np.random.randint(0, 5, size=40).reshape((10, 4)))\n\n result = df.rank()\n exp = df.astype(float).rank()\n assert_frame_equal(result, exp)\n\n result = df.rank(1)\n exp = df.astype(float).rank(1)\n assert_frame_equal(result, exp)\n\n def test_rank2(self):\n from datetime import datetime\n df = DataFrame([[1, 3, 2], [1, 2, 3]])\n expected = DataFrame([[1.0, 3.0, 2.0], [1, 2, 3]]) / 3.0\n result = df.rank(1, pct=True)\n assert_frame_equal(result, expected)\n\n df = DataFrame([[1, 3, 2], [1, 2, 3]])\n expected = df.rank(0) / 2.0\n result = df.rank(0, pct=True)\n assert_frame_equal(result, expected)\n\n\n\n df = DataFrame([['b', 'c', 'a'], ['a', 'c', 'b']])\n expected = DataFrame([[2.0, 3.0, 1.0], [1, 3, 2]])\n result = df.rank(1, numeric_only=False)\n assert_frame_equal(result, expected)\n\n\n expected = DataFrame([[2.0, 1.5, 1.0], [1, 1.5, 2]])\n result = df.rank(0, numeric_only=False)\n assert_frame_equal(result, expected)\n\n df = DataFrame([['b', np.nan, 'a'], ['a', 'c', 'b']])\n expected = DataFrame([[2.0, nan, 1.0], [1.0, 3.0, 2.0]])\n result = df.rank(1, numeric_only=False)\n assert_frame_equal(result, expected)\n\n expected = DataFrame([[2.0, nan, 1.0], [1.0, 1.0, 2.0]])\n result = df.rank(0, numeric_only=False)\n assert_frame_equal(result, expected)\n\n # f7u12, this does not work without extensive workaround\n data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],\n [datetime(2000, 1, 2), datetime(2000, 1, 3),\n datetime(2000, 1, 1)]]\n df = DataFrame(data)\n\n # check the rank\n expected = DataFrame([[2., nan, 1.],\n [2., 3., 1.]])\n result = df.rank(1, numeric_only=False)\n assert_frame_equal(result, expected)\n\n # mixed-type frames\n self.mixed_frame['datetime'] = datetime.now()\n self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)\n\n result = self.mixed_frame.rank(1)\n expected = self.mixed_frame.rank(1, numeric_only=True)\n assert_frame_equal(result, expected)\n\n df = DataFrame({\"a\":[1e-20, -5, 1e-20+1e-40, 10, 1e60, 1e80, 1e-30]})\n exp = DataFrame({\"a\":[ 3.5, 1. , 3.5, 5. , 6. , 7. , 2. ]})\n assert_frame_equal(df.rank(), exp)\n\n def test_rank_na_option(self):\n tm._skip_if_no_scipy()\n from scipy.stats import rankdata\n\n self.frame['A'][::2] = np.nan\n self.frame['B'][::3] = np.nan\n self.frame['C'][::4] = np.nan\n self.frame['D'][::5] = np.nan\n\n # bottom\n ranks0 = self.frame.rank(na_option='bottom')\n ranks1 = self.frame.rank(1, na_option='bottom')\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fvals)\n exp1 = np.apply_along_axis(rankdata, 1, fvals)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # top\n ranks0 = self.frame.rank(na_option='top')\n ranks1 = self.frame.rank(1, na_option='top')\n\n fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values\n fval1 = self.frame.T\n fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T\n fval1 = fval1.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, fval0)\n exp1 = np.apply_along_axis(rankdata, 1, fval1)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # descending\n\n # bottom\n ranks0 = self.frame.rank(na_option='top', ascending=False)\n ranks1 = self.frame.rank(1, na_option='top', ascending=False)\n\n fvals = self.frame.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, -fvals)\n exp1 = np.apply_along_axis(rankdata, 1, -fvals)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n # descending\n\n # top\n ranks0 = self.frame.rank(na_option='bottom', ascending=False)\n ranks1 = self.frame.rank(1, na_option='bottom', ascending=False)\n\n fval0 = self.frame.fillna((self.frame.min() - 1).to_dict()).values\n fval1 = self.frame.T\n fval1 = fval1.fillna((fval1.min() - 1).to_dict()).T\n fval1 = fval1.fillna(np.inf).values\n\n exp0 = np.apply_along_axis(rankdata, 0, -fval0)\n exp1 = np.apply_along_axis(rankdata, 1, -fval1)\n\n assert_almost_equal(ranks0.values, exp0)\n assert_almost_equal(ranks1.values, exp1)\n\n def test_axis_aliases(self):\n\n f = self.frame\n\n # reg name\n expected = f.sum(axis=0)\n result = f.sum(axis='index')\n assert_series_equal(result, expected)\n\n expected = f.sum(axis=1)\n result = f.sum(axis='columns')\n assert_series_equal(result, expected)\n\n def test_combine_first_mixed(self):\n a = Series(['a', 'b'], index=lrange(2))\n b = Series(lrange(2), index=lrange(2))\n f = DataFrame({'A': a, 'B': b})\n\n a = Series(['a', 'b'], index=lrange(5, 7))\n b = Series(lrange(2), index=lrange(5, 7))\n g = DataFrame({'A': a, 'B': b})\n\n combined = f.combine_first(g)\n\n def test_more_asMatrix(self):\n values = self.mixed_frame.as_matrix()\n self.assertEqual(values.shape[1], len(self.mixed_frame.columns))\n\n def test_reindex_boolean(self):\n frame = DataFrame(np.ones((10, 2), dtype=bool),\n index=np.arange(0, 20, 2),\n columns=[0, 2])\n\n reindexed = frame.reindex(np.arange(10))\n self.assertEqual(reindexed.values.dtype, np.object_)\n self.assertTrue(isnull(reindexed[0][1]))\n\n reindexed = frame.reindex(columns=lrange(3))\n self.assertEqual(reindexed.values.dtype, np.object_)\n self.assertTrue(isnull(reindexed[1]).all())\n\n def test_reindex_objects(self):\n reindexed = self.mixed_frame.reindex(columns=['foo', 'A', 'B'])\n self.assertIn('foo', reindexed)\n\n reindexed = self.mixed_frame.reindex(columns=['A', 'B'])\n self.assertNotIn('foo', reindexed)\n\n def test_reindex_corner(self):\n index = Index(['a', 'b', 'c'])\n dm = self.empty.reindex(index=[1, 2, 3])\n reindexed = dm.reindex(columns=index)\n self.assertTrue(reindexed.columns.equals(index))\n\n # ints are weird\n\n smaller = self.intframe.reindex(columns=['A', 'B', 'E'])\n self.assertEqual(smaller['E'].dtype, np.float64)\n\n def test_reindex_axis(self):\n cols = ['A', 'B', 'E']\n reindexed1 = self.intframe.reindex_axis(cols, axis=1)\n reindexed2 = self.intframe.reindex(columns=cols)\n assert_frame_equal(reindexed1, reindexed2)\n\n rows = self.intframe.index[0:5]\n reindexed1 = self.intframe.reindex_axis(rows, axis=0)\n reindexed2 = self.intframe.reindex(index=rows)\n assert_frame_equal(reindexed1, reindexed2)\n\n self.assertRaises(ValueError, self.intframe.reindex_axis, rows, axis=2)\n\n # no-op case\n cols = self.frame.columns.copy()\n newFrame = self.frame.reindex_axis(cols, axis=1)\n assert_frame_equal(newFrame, self.frame)\n\n def test_reindex_with_nans(self):\n df = DataFrame([[1, 2], [3, 4], [np.nan, np.nan], [7, 8], [9, 10]],\n columns=['a', 'b'],\n index=[100.0, 101.0, np.nan, 102.0, 103.0])\n\n result = df.reindex(index=[101.0, 102.0, 103.0])\n expected = df.iloc[[1, 3, 4]]\n assert_frame_equal(result, expected)\n\n result = df.reindex(index=[103.0])\n expected = df.iloc[[4]]\n assert_frame_equal(result, expected)\n\n result = df.reindex(index=[101.0])\n expected = df.iloc[[1]]\n assert_frame_equal(result, expected)\n\n def test_reindex_multi(self):\n df = DataFrame(np.random.randn(3, 3))\n\n result = df.reindex(lrange(4), lrange(4))\n expected = df.reindex(lrange(4)).reindex(columns=lrange(4))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randint(0, 10, (3, 3)))\n\n result = df.reindex(lrange(4), lrange(4))\n expected = df.reindex(lrange(4)).reindex(columns=lrange(4))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randint(0, 10, (3, 3)))\n\n result = df.reindex(lrange(2), lrange(2))\n expected = df.reindex(lrange(2)).reindex(columns=lrange(2))\n\n assert_frame_equal(result, expected)\n\n df = DataFrame(np.random.randn(5, 3) + 1j, columns=['a', 'b', 'c'])\n\n result = df.reindex(index=[0, 1], columns=['a', 'b'])\n expected = df.reindex([0, 1]).reindex(columns=['a', 'b'])\n\n assert_frame_equal(result, expected)\n\n def test_rename_objects(self):\n renamed = self.mixed_frame.rename(columns=str.upper)\n self.assertIn('FOO', renamed)\n self.assertNotIn('foo', renamed)\n\n def test_fill_corner(self):\n self.mixed_frame['foo'][5:20] = nan\n self.mixed_frame['A'][-10:] = nan\n\n filled = self.mixed_frame.fillna(value=0)\n self.assertTrue((filled['foo'][5:20] == 0).all())\n del self.mixed_frame['foo']\n\n empty_float = self.frame.reindex(columns=[])\n result = empty_float.fillna(value=0)\n\n def test_count_objects(self):\n dm = DataFrame(self.mixed_frame._series)\n df = DataFrame(self.mixed_frame._series)\n\n tm.assert_series_equal(dm.count(), df.count())\n tm.assert_series_equal(dm.count(1), df.count(1))\n\n def test_cumsum_corner(self):\n dm = DataFrame(np.arange(20).reshape(4, 5),\n index=lrange(4), columns=lrange(5))\n result = dm.cumsum()\n\n #----------------------------------------------------------------------\n # Stacking / unstacking\n\n def test_stack_unstack(self):\n stacked = self.frame.stack()\n stacked_df = DataFrame({'foo': stacked, 'bar': stacked})\n\n unstacked = stacked.unstack()\n unstacked_df = stacked_df.unstack()\n\n assert_frame_equal(unstacked, self.frame)\n assert_frame_equal(unstacked_df['bar'], self.frame)\n\n unstacked_cols = stacked.unstack(0)\n unstacked_cols_df = stacked_df.unstack(0)\n assert_frame_equal(unstacked_cols.T, self.frame)\n assert_frame_equal(unstacked_cols_df['bar'].T, self.frame)\n\n def test_unstack_bool(self):\n df = DataFrame([False, False],\n index=MultiIndex.from_arrays([['a', 'b'], ['c', 'l']]),\n columns=['col'])\n rs = df.unstack()\n xp = DataFrame(np.array([[False, np.nan], [np.nan, False]],\n dtype=object),\n index=['a', 'b'],\n columns=MultiIndex.from_arrays([['col', 'col'],\n ['c', 'l']]))\n assert_frame_equal(rs, xp)\n\n def test_unstack_to_series(self):\n # check reversibility\n data = self.frame.unstack()\n\n self.assertTrue(isinstance(data, Series))\n undo = data.unstack().T\n assert_frame_equal(undo, self.frame)\n\n # check NA handling\n data = DataFrame({'x': [1, 2, np.NaN], 'y': [3.0, 4, np.NaN]})\n data.index = Index(['a', 'b', 'c'])\n result = data.unstack()\n\n midx = MultiIndex(levels=[['x', 'y'], ['a', 'b', 'c']],\n labels=[[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])\n expected = Series([1, 2, np.NaN, 3, 4, np.NaN], index=midx)\n\n assert_series_equal(result, expected)\n\n # check composability of unstack\n old_data = data.copy()\n for _ in range(4):\n data = data.unstack()\n assert_frame_equal(old_data, data)\n\n def test_unstack_dtypes(self):\n\n # GH 2929\n rows = [[1, 1, 3, 4],\n [1, 2, 3, 4],\n [2, 1, 3, 4],\n [2, 2, 3, 4]]\n\n df = DataFrame(rows, columns=list('ABCD'))\n result = df.get_dtype_counts()\n expected = Series({'int64' : 4})\n assert_series_equal(result, expected)\n\n # single dtype\n df2 = df.set_index(['A','B'])\n df3 = df2.unstack('B')\n result = df3.get_dtype_counts()\n expected = Series({'int64' : 4})\n assert_series_equal(result, expected)\n\n # mixed\n df2 = df.set_index(['A','B'])\n df2['C'] = 3.\n df3 = df2.unstack('B')\n result = df3.get_dtype_counts()\n expected = Series({'int64' : 2, 'float64' : 2})\n assert_series_equal(result, expected)\n\n df2['D'] = 'foo'\n df3 = df2.unstack('B')\n result = df3.get_dtype_counts()\n expected = Series({'float64' : 2, 'object' : 2})\n assert_series_equal(result, expected)\n\n def test_unstack_non_unique_index_names(self):\n idx = MultiIndex.from_tuples([('a', 'b'), ('c', 'd')],\n names=['c1', 'c1'])\n df = DataFrame([1, 2], index=idx)\n with tm.assertRaises(ValueError):\n df.unstack('c1')\n\n with tm.assertRaises(ValueError):\n df.T.stack('c1')\n\n def test_repr_with_mi_nat(self):\n df = DataFrame({'X': [1, 2]},\n index=[[pd.NaT, pd.Timestamp('20130101')], ['a', 'b']])\n res = repr(df)\n exp = ' X\\nNaT a 1\\n2013-01-01 b 2'\n nose.tools.assert_equal(res, exp)\n\n def test_reset_index(self):\n stacked = self.frame.stack()[::2]\n stacked = DataFrame({'foo': stacked, 'bar': stacked})\n\n names = ['first', 'second']\n stacked.index.names = names\n deleveled = stacked.reset_index()\n for i, (lev, lab) in enumerate(zip(stacked.index.levels,\n stacked.index.labels)):\n values = lev.take(lab)\n name = names[i]\n assert_almost_equal(values, deleveled[name])\n\n stacked.index.names = [None, None]\n deleveled2 = stacked.reset_index()\n self.assert_numpy_array_equal(deleveled['first'],\n deleveled2['level_0'])\n self.assert_numpy_array_equal(deleveled['second'],\n deleveled2['level_1'])\n\n # default name assigned\n rdf = self.frame.reset_index()\n self.assert_numpy_array_equal(rdf['index'], self.frame.index.values)\n\n # default name assigned, corner case\n df = self.frame.copy()\n df['index'] = 'foo'\n rdf = df.reset_index()\n self.assert_numpy_array_equal(rdf['level_0'], self.frame.index.values)\n\n # but this is ok\n self.frame.index.name = 'index'\n deleveled = self.frame.reset_index()\n self.assert_numpy_array_equal(deleveled['index'],\n self.frame.index.values)\n self.assert_numpy_array_equal(deleveled.index,\n np.arange(len(deleveled)))\n\n # preserve column names\n self.frame.columns.name = 'columns'\n resetted = self.frame.reset_index()\n self.assertEqual(resetted.columns.name, 'columns')\n\n # only remove certain columns\n frame = self.frame.reset_index().set_index(['index', 'A', 'B'])\n rs = frame.reset_index(['A', 'B'])\n\n assert_frame_equal(rs, self.frame, check_names=False) # TODO should reset_index check_names ?\n\n rs = frame.reset_index(['index', 'A', 'B'])\n assert_frame_equal(rs, self.frame.reset_index(), check_names=False)\n\n rs = frame.reset_index(['index', 'A', 'B'])\n assert_frame_equal(rs, self.frame.reset_index(), check_names=False)\n\n rs = frame.reset_index('A')\n xp = self.frame.reset_index().set_index(['index', 'B'])\n assert_frame_equal(rs, xp, check_names=False)\n\n # test resetting in place\n df = self.frame.copy()\n resetted = self.frame.reset_index()\n df.reset_index(inplace=True)\n assert_frame_equal(df, resetted, check_names=False)\n\n frame = self.frame.reset_index().set_index(['index', 'A', 'B'])\n rs = frame.reset_index('A', drop=True)\n xp = self.frame.copy()\n del xp['A']\n xp = xp.set_index(['B'], append=True)\n assert_frame_equal(rs, xp, check_names=False)\n\n def test_reset_index_right_dtype(self):\n time = np.arange(0.0, 10, np.sqrt(2) / 2)\n s1 = Series((9.81 * time ** 2) / 2,\n index=Index(time, name='time'),\n name='speed')\n df = DataFrame(s1)\n\n resetted = s1.reset_index()\n self.assertEqual(resetted['time'].dtype, np.float64)\n\n resetted = df.reset_index()\n self.assertEqual(resetted['time'].dtype, np.float64)\n\n def test_reset_index_multiindex_col(self):\n vals = np.random.randn(3, 3).astype(object)\n idx = ['x', 'y', 'z']\n full = np.hstack(([[x] for x in idx], vals))\n df = DataFrame(vals, Index(idx, name='a'),\n columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])\n rs = df.reset_index()\n xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],\n ['', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index(col_fill=None)\n xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index(col_level=1, col_fill='blah')\n xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n df = DataFrame(vals,\n MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']],\n names=['d', 'a']),\n columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']])\n rs = df.reset_index('a', )\n xp = DataFrame(full, Index([0, 1, 2], name='d'),\n columns=[['a', 'b', 'b', 'c'],\n ['', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index('a', col_fill=None)\n xp = DataFrame(full, Index(lrange(3), name='d'),\n columns=[['a', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n rs = df.reset_index('a', col_fill='blah', col_level=1)\n xp = DataFrame(full, Index(lrange(3), name='d'),\n columns=[['blah', 'b', 'b', 'c'],\n ['a', 'mean', 'median', 'mean']])\n assert_frame_equal(rs, xp)\n\n def test_reset_index_with_datetimeindex_cols(self):\n # GH5818\n #\n df = pd.DataFrame([[1, 2], [3, 4]],\n columns=pd.date_range('1/1/2013', '1/2/2013'),\n index=['A', 'B'])\n\n result = df.reset_index()\n expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]],\n columns=['index', datetime(2013, 1, 1),\n datetime(2013, 1, 2)])\n assert_frame_equal(result, expected)\n\n #----------------------------------------------------------------------\n # Tests to cope with refactored internals\n def test_as_matrix_numeric_cols(self):\n self.frame['foo'] = 'bar'\n\n values = self.frame.as_matrix(['A', 'B', 'C', 'D'])\n self.assertEqual(values.dtype, np.float64)\n\n def test_as_matrix_lcd(self):\n\n # mixed lcd\n values = self.mixed_float.as_matrix(['A', 'B', 'C', 'D'])\n self.assertEqual(values.dtype, np.float64)\n\n values = self.mixed_float.as_matrix(['A', 'B', 'C' ])\n self.assertEqual(values.dtype, np.float32)\n\n values = self.mixed_float.as_matrix(['C'])\n self.assertEqual(values.dtype, np.float16)\n\n values = self.mixed_int.as_matrix(['A','B','C','D'])\n self.assertEqual(values.dtype, np.int64)\n\n values = self.mixed_int.as_matrix(['A','D'])\n self.assertEqual(values.dtype, np.int64)\n\n # guess all ints are cast to uints....\n values = self.mixed_int.as_matrix(['A','B','C'])\n self.assertEqual(values.dtype, np.int64)\n\n values = self.mixed_int.as_matrix(['A','C'])\n self.assertEqual(values.dtype, np.int32)\n\n values = self.mixed_int.as_matrix(['C','D'])\n self.assertEqual(values.dtype, np.int64)\n\n values = self.mixed_int.as_matrix(['A'])\n self.assertEqual(values.dtype, np.int32)\n\n values = self.mixed_int.as_matrix(['C'])\n self.assertEqual(values.dtype, np.uint8)\n\n def test_constructor_with_convert(self):\n # this is actually mostly a test of lib.maybe_convert_objects\n # #2845\n df = DataFrame({'A' : [2**63-1] })\n result = df['A']\n expected = Series(np.asarray([2**63-1], np.int64))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [2**63] })\n result = df['A']\n expected = Series(np.asarray([2**63], np.object_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [datetime(2005, 1, 1), True] })\n result = df['A']\n expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [None, 1] })\n result = df['A']\n expected = Series(np.asarray([np.nan, 1], np.float_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0, 2] })\n result = df['A']\n expected = Series(np.asarray([1.0, 2], np.float_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, 3] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, 3], np.complex_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, 3.0] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, 3.0], np.complex_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, True] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, True], np.object_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0, None] })\n result = df['A']\n expected = Series(np.asarray([1.0, np.nan], np.float_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [1.0+2.0j, None] })\n result = df['A']\n expected = Series(np.asarray([1.0+2.0j, np.nan], np.complex_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [2.0, 1, True, None] })\n result = df['A']\n expected = Series(np.asarray([2.0, 1, True, None], np.object_))\n assert_series_equal(result, expected)\n\n df = DataFrame({'A' : [2.0, 1, datetime(2006, 1, 1), None] })\n result = df['A']\n expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),\n None], np.object_))\n assert_series_equal(result, expected)\n\n def test_construction_with_mixed(self):\n # test construction edge cases with mixed types\n\n # f7u12, this does not work without extensive workaround\n data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],\n [datetime(2000, 1, 2), datetime(2000, 1, 3),\n datetime(2000, 1, 1)]]\n df = DataFrame(data)\n\n # check dtypes\n result = df.get_dtype_counts().order()\n expected = Series({ 'datetime64[ns]' : 3 })\n\n # mixed-type frames\n self.mixed_frame['datetime'] = datetime.now()\n self.mixed_frame['timedelta'] = timedelta(days=1,seconds=1)\n self.assertEqual(self.mixed_frame['datetime'].dtype, 'M8[ns]')\n self.assertEqual(self.mixed_frame['timedelta'].dtype, 'm8[ns]')\n result = self.mixed_frame.get_dtype_counts().order()\n expected = Series({ 'float64' : 4,\n 'object' : 1,\n 'datetime64[ns]' : 1,\n 'timedelta64[ns]' : 1}).order()\n assert_series_equal(result,expected)\n\n def test_constructor_frame_copy(self):\n cop = DataFrame(self.frame, copy=True)\n cop['A'] = 5\n self.assertTrue((cop['A'] == 5).all())\n self.assertFalse((self.frame['A'] == 5).all())\n\n def test_constructor_ndarray_copy(self):\n df = DataFrame(self.frame.values)\n\n self.frame.values[5] = 5\n self.assertTrue((df.values[5] == 5).all())\n\n df = DataFrame(self.frame.values, copy=True)\n self.frame.values[6] = 6\n self.assertFalse((df.values[6] == 6).all())\n\n def test_constructor_series_copy(self):\n series = self.frame._series\n\n df = DataFrame({'A': series['A']})\n df['A'][:] = 5\n\n self.assertFalse((series['A'] == 5).all())\n\n def test_constructor_compound_dtypes(self):\n # GH 5191\n # compound dtypes should raise not-implementederror\n\n def f(dtype):\n return DataFrame(data = list(itertools.repeat((datetime(2001, 1, 1), \"aa\", 20), 9)),\n columns=[\"A\", \"B\", \"C\"], dtype=dtype)\n\n self.assertRaises(NotImplementedError, f, [(\"A\",\"datetime64[h]\"), (\"B\",\"str\"), (\"C\",\"int32\")])\n\n # these work (though results may be unexpected)\n f('int64')\n f('float64')\n f('M8[ns]')\n\n def test_assign_columns(self):\n self.frame['hi'] = 'there'\n\n frame = self.frame.copy()\n frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2']\n assert_series_equal(self.frame['C'], frame['baz'])\n assert_series_equal(self.frame['hi'], frame['foo2'])\n\n def test_columns_with_dups(self):\n\n # GH 3468 related\n\n # basic\n df = DataFrame([[1,2]], columns=['a','a'])\n df.columns = ['a','a.1']\n str(df)\n expected = DataFrame([[1,2]], columns=['a','a.1'])\n assert_frame_equal(df, expected)\n\n df = DataFrame([[1,2,3]], columns=['b','a','a'])\n df.columns = ['b','a','a.1']\n str(df)\n expected = DataFrame([[1,2,3]], columns=['b','a','a.1'])\n assert_frame_equal(df, expected)\n\n # with a dup index\n df = DataFrame([[1,2]], columns=['a','a'])\n df.columns = ['b','b']\n str(df)\n expected = DataFrame([[1,2]], columns=['b','b'])\n assert_frame_equal(df, expected)\n\n # multi-dtype\n df = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=['a','a','b','b','d','c','c'])\n df.columns = list('ABCDEFG')\n str(df)\n expected = DataFrame([[1,2,1.,2.,3.,'foo','bar']], columns=list('ABCDEFG'))\n assert_frame_equal(df, expected)\n\n # this is an error because we cannot disambiguate the dup columns\n self.assertRaises(Exception, lambda x: DataFrame([[1,2,'foo','bar']], columns=['a','a','a','a']))\n\n # dups across blocks\n df_float = DataFrame(np.random.randn(10, 3),dtype='float64')\n df_int = DataFrame(np.random.randn(10, 3),dtype='int64')\n df_bool = DataFrame(True,index=df_float.index,columns=df_float.columns)\n df_object = DataFrame('foo',index=df_float.index,columns=df_float.columns)\n df_dt = DataFrame(Timestamp('20010101'),index=df_float.index,columns=df_float.columns)\n df = pd.concat([ df_float, df_int, df_bool, df_object, df_dt ], axis=1)\n\n self.assertEqual(len(df._data._blknos), len(df.columns))\n self.assertEqual(len(df._data._blklocs), len(df.columns))\n\n # testing iget\n for i in range(len(df.columns)):\n df.iloc[:,i]\n\n # dup columns across dtype GH 2079/2194\n vals = [[1, -1, 2.], [2, -2, 3.]]\n rs = DataFrame(vals, columns=['A', 'A', 'B'])\n xp = DataFrame(vals)\n xp.columns = ['A', 'A', 'B']\n assert_frame_equal(rs, xp)\n\n def test_insert_column_bug_4032(self):\n\n # GH4032, inserting a column and renaming causing errors\n df = DataFrame({'b': [1.1, 2.2]})\n df = df.rename(columns={})\n df.insert(0, 'a', [1, 2])\n\n result = df.rename(columns={})\n str(result)\n expected = DataFrame([[1,1.1],[2, 2.2]],columns=['a','b'])\n assert_frame_equal(result,expected)\n df.insert(0, 'c', [1.3, 2.3])\n\n result = df.rename(columns={})\n str(result)\n\n expected = DataFrame([[1.3,1,1.1],[2.3,2, 2.2]],columns=['c','a','b'])\n assert_frame_equal(result,expected)\n\n def test_cast_internals(self):\n casted = DataFrame(self.frame._data, dtype=int)\n expected = DataFrame(self.frame._series, dtype=int)\n assert_frame_equal(casted, expected)\n\n casted = DataFrame(self.frame._data, dtype=np.int32)\n expected = DataFrame(self.frame._series, dtype=np.int32)\n assert_frame_equal(casted, expected)\n\n def test_consolidate(self):\n self.frame['E'] = 7.\n consolidated = self.frame.consolidate()\n self.assertEqual(len(consolidated._data.blocks), 1)\n\n # Ensure copy, do I want this?\n recons = consolidated.consolidate()\n self.assertIsNot(recons, consolidated)\n assert_frame_equal(recons, consolidated)\n\n self.frame['F'] = 8.\n self.assertEqual(len(self.frame._data.blocks), 3)\n self.frame.consolidate(inplace=True)\n self.assertEqual(len(self.frame._data.blocks), 1)\n\n def test_consolidate_inplace(self):\n frame = self.frame.copy()\n\n # triggers in-place consolidation\n for letter in range(ord('A'), ord('Z')):\n self.frame[chr(letter)] = chr(letter)\n\n def test_as_matrix_consolidate(self):\n self.frame['E'] = 7.\n self.assertFalse(self.frame._data.is_consolidated())\n _ = self.frame.as_matrix()\n self.assertTrue(self.frame._data.is_consolidated())\n\n def test_modify_values(self):\n self.frame.values[5] = 5\n self.assertTrue((self.frame.values[5] == 5).all())\n\n # unconsolidated\n self.frame['E'] = 7.\n self.frame.values[6] = 6\n self.assertTrue((self.frame.values[6] == 6).all())\n\n def test_boolean_set_uncons(self):\n self.frame['E'] = 7.\n\n expected = self.frame.values.copy()\n expected[expected > 1] = 2\n\n self.frame[self.frame > 1] = 2\n assert_almost_equal(expected, self.frame.values)\n\n def test_xs_view(self):\n \"\"\"\n in 0.14 this will return a view if possible\n a copy otherwise, but this is numpy dependent\n \"\"\"\n\n dm = DataFrame(np.arange(20.).reshape(4, 5),\n index=lrange(4), columns=lrange(5))\n\n dm.xs(2)[:] = 10\n self.assertTrue((dm.xs(2) == 10).all())\n\n def test_boolean_indexing(self):\n idx = lrange(3)\n cols = ['A','B','C']\n df1 = DataFrame(index=idx, columns=cols,\n data=np.array([[0.0, 0.5, 1.0],\n [1.5, 2.0, 2.5],\n [3.0, 3.5, 4.0]],\n dtype=float))\n df2 = DataFrame(index=idx, columns=cols,\n data=np.ones((len(idx), len(cols))))\n\n expected = DataFrame(index=idx, columns=cols,\n data=np.array([[0.0, 0.5, 1.0],\n [1.5, 2.0, -1],\n [-1, -1, -1]], dtype=float))\n\n df1[df1 > 2.0 * df2] = -1\n assert_frame_equal(df1, expected)\n with assertRaisesRegexp(ValueError, 'Item wrong length'):\n df1[df1.index[:-1] > 2] = -1\n\n def test_boolean_indexing_mixed(self):\n df = DataFrame(\n {long(0): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n long(1): {35: np.nan,\n 40: 0.32632316859446198,\n 43: np.nan,\n 49: 0.32632316859446198,\n 50: 0.39114724480578139},\n long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987, 49: np.nan, 50: np.nan},\n long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},\n 'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})\n\n # mixed int/float ok\n df2 = df.copy()\n df2[df2>0.3] = 1\n expected = df.copy()\n expected.loc[40,1] = 1\n expected.loc[49,1] = 1\n expected.loc[50,1] = 1\n expected.loc[35,4] = 1\n assert_frame_equal(df2,expected)\n\n df['foo'] = 'test'\n with tm.assertRaisesRegexp(TypeError, 'boolean setting on mixed-type'):\n df[df > 0.3] = 1\n\n def test_sum_bools(self):\n df = DataFrame(index=lrange(1), columns=lrange(10))\n bools = isnull(df)\n self.assertEqual(bools.sum(axis=1)[0], 10)\n\n def test_fillna_col_reordering(self):\n idx = lrange(20)\n cols = [\"COL.\" + str(i) for i in range(5, 0, -1)]\n data = np.random.rand(20, 5)\n df = DataFrame(index=lrange(20), columns=cols, data=data)\n filled = df.fillna(method='ffill')\n self.assertEqual(df.columns.tolist(), filled.columns.tolist())\n\n def test_take(self):\n\n # homogeneous\n #----------------------------------------\n order = [3, 1, 2, 0]\n for df in [self.frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['D', 'B', 'C', 'A']]\n assert_frame_equal(result, expected, check_names=False)\n\n # neg indicies\n order = [2,1,-1]\n for df in [self.frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['C', 'B', 'D']]\n assert_frame_equal(result, expected, check_names=False)\n\n # illegal indices\n self.assertRaises(IndexError, df.take, [3,1,2,30], axis=0)\n self.assertRaises(IndexError, df.take, [3,1,2,-31], axis=0)\n self.assertRaises(IndexError, df.take, [3,1,2,5], axis=1)\n self.assertRaises(IndexError, df.take, [3,1,2,-5], axis=1)\n\n # mixed-dtype\n #----------------------------------------\n order = [4, 1, 2, 0, 3]\n for df in [self.mixed_frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['foo', 'B', 'C', 'A', 'D']]\n assert_frame_equal(result, expected)\n\n # neg indicies\n order = [4,1,-2]\n for df in [self.mixed_frame]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['foo', 'B', 'D']]\n assert_frame_equal(result, expected)\n\n # by dtype\n order = [1, 2, 0, 3]\n for df in [self.mixed_float,self.mixed_int]:\n\n result = df.take(order, axis=0)\n expected = df.reindex(df.index.take(order))\n assert_frame_equal(result, expected)\n\n # axis = 1\n result = df.take(order, axis=1)\n expected = df.ix[:, ['B', 'C', 'A', 'D']]\n assert_frame_equal(result, expected)\n\n def test_iterkv_deprecation(self):\n with tm.assert_produces_warning(DeprecationWarning):\n self.mixed_float.iterkv()\n\n def test_iterkv_names(self):\n for k, v in compat.iteritems(self.mixed_frame):\n self.assertEqual(v.name, k)\n\n def test_series_put_names(self):\n series = self.mixed_frame._series\n for k, v in compat.iteritems(series):\n self.assertEqual(v.name, k)\n\n def test_dot(self):\n a = DataFrame(np.random.randn(3, 4), index=['a', 'b', 'c'],\n columns=['p', 'q', 'r', 's'])\n b = DataFrame(np.random.randn(4, 2), index=['p', 'q', 'r', 's'],\n columns=['one', 'two'])\n\n result = a.dot(b)\n expected = DataFrame(np.dot(a.values, b.values),\n index=['a', 'b', 'c'],\n columns=['one', 'two'])\n # Check alignment\n b1 = b.reindex(index=reversed(b.index))\n result = a.dot(b)\n assert_frame_equal(result, expected)\n\n # Check series argument\n result = a.dot(b['one'])\n assert_series_equal(result, expected['one'])\n result = a.dot(b1['one'])\n assert_series_equal(result, expected['one'])\n\n # can pass correct-length arrays\n row = a.ix[0].values\n\n result = a.dot(row)\n exp = a.dot(a.ix[0])\n assert_series_equal(result, exp)\n\n with assertRaisesRegexp(ValueError, 'Dot product shape mismatch'):\n a.dot(row[:-1])\n\n a = np.random.rand(1, 5)\n b = np.random.rand(5, 1)\n A = DataFrame(a)\n B = DataFrame(b)\n\n # it works\n result = A.dot(b)\n\n # unaligned\n df = DataFrame(randn(3, 4), index=[1, 2, 3], columns=lrange(4))\n df2 = DataFrame(randn(5, 3), index=lrange(5), columns=[1, 2, 3])\n\n assertRaisesRegexp(ValueError, 'aligned', df.dot, df2)\n\n def test_idxmin(self):\n frame = self.frame\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, self.intframe]:\n result = df.idxmin(axis=axis, skipna=skipna)\n expected = df.apply(\n Series.idxmin, axis=axis, skipna=skipna)\n assert_series_equal(result, expected)\n\n self.assertRaises(ValueError, frame.idxmin, axis=2)\n\n def test_idxmax(self):\n frame = self.frame\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n for skipna in [True, False]:\n for axis in [0, 1]:\n for df in [frame, self.intframe]:\n result = df.idxmax(axis=axis, skipna=skipna)\n expected = df.apply(\n Series.idxmax, axis=axis, skipna=skipna)\n assert_series_equal(result, expected)\n\n self.assertRaises(ValueError, frame.idxmax, axis=2)\n\n def test_stale_cached_series_bug_473(self):\n Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),\n columns=('e', 'f', 'g', 'h'))\n repr(Y)\n Y['e'] = Y['e'].astype('object')\n Y['g']['c'] = np.NaN\n repr(Y)\n result = Y.sum()\n exp = Y['g'].sum()\n self.assertTrue(isnull(Y['g']['c']))\n\n def test_index_namedtuple(self):\n from collections import namedtuple\n IndexType = namedtuple(\"IndexType\", [\"a\", \"b\"])\n idx1 = IndexType(\"foo\", \"bar\")\n idx2 = IndexType(\"baz\", \"bof\")\n index = Index([idx1, idx2],\n name=\"composite_index\", tupleize_cols=False)\n df = DataFrame([(1, 2), (3, 4)], index=index, columns=[\"A\", \"B\"])\n self.assertEqual(df.ix[IndexType(\"foo\", \"bar\")][\"A\"], 1)\n\n def test_empty_nonzero(self):\n df = DataFrame([1, 2, 3])\n self.assertFalse(df.empty)\n df = DataFrame(index=['a', 'b'], columns=['c', 'd']).dropna()\n self.assertTrue(df.empty)\n self.assertTrue(df.T.empty)\n\n def test_any_all(self):\n self._check_bool_op('any', np.any, has_skipna=True, has_bool_only=True)\n self._check_bool_op('all', np.all, has_skipna=True, has_bool_only=True)\n\n df = DataFrame(randn(10, 4)) > 0\n df.any(1)\n df.all(1)\n df.any(1, bool_only=True)\n df.all(1, bool_only=True)\n\n # skip pathological failure cases\n # class CantNonzero(object):\n\n # def __nonzero__(self):\n # raise ValueError\n\n # df[4] = CantNonzero()\n\n # it works!\n # df.any(1)\n # df.all(1)\n # df.any(1, bool_only=True)\n # df.all(1, bool_only=True)\n\n # df[4][4] = np.nan\n # df.any(1)\n # df.all(1)\n # df.any(1, bool_only=True)\n # df.all(1, bool_only=True)\n\n def test_consolidate_datetime64(self):\n # numpy vstack bug\n\n data = \"\"\"\\\nstarting,ending,measure\n2012-06-21 00:00,2012-06-23 07:00,77\n2012-06-23 07:00,2012-06-23 16:30,65\n2012-06-23 16:30,2012-06-25 08:00,77\n2012-06-25 08:00,2012-06-26 12:00,0\n2012-06-26 12:00,2012-06-27 08:00,77\n\"\"\"\n df = read_csv(StringIO(data), parse_dates=[0, 1])\n\n ser_starting = df.starting\n ser_starting.index = ser_starting.values\n ser_starting = ser_starting.tz_localize('US/Eastern')\n ser_starting = ser_starting.tz_convert('UTC')\n\n ser_ending = df.ending\n ser_ending.index = ser_ending.values\n ser_ending = ser_ending.tz_localize('US/Eastern')\n ser_ending = ser_ending.tz_convert('UTC')\n\n df.starting = ser_starting.index\n df.ending = ser_ending.index\n\n assert_array_equal(df.starting.values, ser_starting.index.values)\n assert_array_equal(df.ending.values, ser_ending.index.values)\n\n def test_tslib_tz_convert_trans_pos_plus_1__bug(self):\n # Regression test for tslib.tz_convert(vals, tz1, tz2).\n # See https://github.com/pydata/pandas/issues/4496 for details.\n idx = pd.date_range(datetime(2011, 3, 26, 23), datetime(2011, 3, 27, 1), freq='1min')\n idx = idx.tz_localize('UTC')\n idx = idx.tz_convert('Europe/Moscow')\n\n test_vector = pd.Series([3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3,\n 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,\n 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,\n 4, 4, 4, 4, 4, 4, 4, 4, 5], dtype=int)\n\n hours = idx.hour\n\n np.testing.assert_equal(hours, test_vector.values)\n\n def _check_bool_op(self, name, alternative, frame=None, has_skipna=True,\n has_bool_only=False):\n if frame is None:\n frame = self.frame > 0\n # set some NAs\n frame = DataFrame(frame.values.astype(object), frame.index,\n frame.columns)\n frame.ix[5:10] = np.nan\n frame.ix[15:20, -2:] = np.nan\n\n f = getattr(frame, name)\n\n if has_skipna:\n def skipna_wrapper(x):\n nona = x.dropna().values\n return alternative(nona)\n\n def wrapper(x):\n return alternative(x.values)\n\n result0 = f(axis=0, skipna=False)\n result1 = f(axis=1, skipna=False)\n assert_series_equal(result0, frame.apply(wrapper))\n assert_series_equal(result1, frame.apply(wrapper, axis=1),\n check_dtype=False) # HACK: win32\n else:\n skipna_wrapper = alternative\n wrapper = alternative\n\n result0 = f(axis=0)\n result1 = f(axis=1)\n assert_series_equal(result0, frame.apply(skipna_wrapper))\n assert_series_equal(result1, frame.apply(skipna_wrapper, axis=1),\n check_dtype=False)\n\n # result = f(axis=1)\n # comp = frame.apply(alternative, axis=1).reindex(result.index)\n # assert_series_equal(result, comp)\n\n # bad axis\n self.assertRaises(ValueError, f, axis=2)\n\n # make sure works on mixed-type frame\n mixed = self.mixed_frame\n mixed['_bool_'] = np.random.randn(len(mixed)) > 0\n getattr(mixed, name)(axis=0)\n getattr(mixed, name)(axis=1)\n\n class NonzeroFail:\n\n def __nonzero__(self):\n raise ValueError\n\n mixed['_nonzero_fail_'] = NonzeroFail()\n\n if has_bool_only:\n getattr(mixed, name)(axis=0, bool_only=True)\n getattr(mixed, name)(axis=1, bool_only=True)\n getattr(frame, name)(axis=0, bool_only=False)\n getattr(frame, name)(axis=1, bool_only=False)\n\n # all NA case\n if has_skipna:\n all_na = frame * np.NaN\n r0 = getattr(all_na, name)(axis=0)\n r1 = getattr(all_na, name)(axis=1)\n if name == 'any':\n self.assertFalse(r0.any())\n self.assertFalse(r1.any())\n else:\n self.assertTrue(r0.all())\n self.assertTrue(r1.all())\n\n def test_strange_column_corruption_issue(self):\n df = DataFrame(index=[0, 1])\n df[0] = nan\n wasCol = {}\n # uncommenting these makes the results match\n # for col in xrange(100, 200):\n # wasCol[col] = 1\n # df[col] = nan\n\n for i, dt in enumerate(df.index):\n for col in range(100, 200):\n if not col in wasCol:\n wasCol[col] = 1\n df[col] = nan\n df[col][dt] = i\n\n myid = 100\n\n first = len(df.ix[isnull(df[myid]), [myid]])\n second = len(df.ix[isnull(df[myid]), [myid]])\n self.assertTrue(first == second == 0)\n\n def test_inplace_return_self(self):\n # re #1893\n\n data = DataFrame({'a': ['foo', 'bar', 'baz', 'qux'],\n 'b': [0, 0, 1, 1],\n 'c': [1, 2, 3, 4]})\n\n def _check_f(base, f):\n result = f(base)\n self.assertTrue(result is None)\n\n # -----DataFrame-----\n\n # set_index\n f = lambda x: x.set_index('a', inplace=True)\n _check_f(data.copy(), f)\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True)\n _check_f(data.set_index('a'), f)\n\n # drop_duplicates\n f = lambda x: x.drop_duplicates(inplace=True)\n _check_f(data.copy(), f)\n\n # sort\n f = lambda x: x.sort('b', inplace=True)\n _check_f(data.copy(), f)\n\n # sort_index\n f = lambda x: x.sort_index(inplace=True)\n _check_f(data.copy(), f)\n\n # sortlevel\n f = lambda x: x.sortlevel(0, inplace=True)\n _check_f(data.set_index(['a', 'b']), f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(data.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(data.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: 'foo'}, inplace=True)\n _check_f(data.copy(), f)\n\n # -----Series-----\n d = data.copy()['c']\n\n # reset_index\n f = lambda x: x.reset_index(inplace=True, drop=True)\n _check_f(data.set_index('a')['c'], f)\n\n # fillna\n f = lambda x: x.fillna(0, inplace=True)\n _check_f(d.copy(), f)\n\n # replace\n f = lambda x: x.replace(1, 0, inplace=True)\n _check_f(d.copy(), f)\n\n # rename\n f = lambda x: x.rename({1: 'foo'}, inplace=True)\n _check_f(d.copy(), f)\n\n def test_isin(self):\n # GH #4211\n df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],\n 'ids2': ['a', 'n', 'c', 'n']},\n index=['foo', 'bar', 'baz', 'qux'])\n other = ['a', 'b', 'c']\n\n result = df.isin(other)\n expected = DataFrame([df.loc[s].isin(other) for s in df.index])\n assert_frame_equal(result, expected)\n\n def test_isin_empty(self):\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n result = df.isin([])\n expected = pd.DataFrame(False, df.index, df.columns)\n assert_frame_equal(result, expected)\n\n def test_isin_dict(self):\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n d = {'A': ['a']}\n\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, 'A'] = True\n\n result = df.isin(d)\n assert_frame_equal(result, expected)\n\n # non unique columns\n df = DataFrame({'A': ['a', 'b', 'c'], 'B': ['a', 'e', 'f']})\n df.columns = ['A', 'A']\n expected = DataFrame(False, df.index, df.columns)\n expected.loc[0, 'A'] = True\n result = df.isin(d)\n assert_frame_equal(result, expected)\n\n def test_isin_with_string_scalar(self):\n #GH4763\n df = DataFrame({'vals': [1, 2, 3, 4], 'ids': ['a', 'b', 'f', 'n'],\n 'ids2': ['a', 'n', 'c', 'n']},\n index=['foo', 'bar', 'baz', 'qux'])\n with tm.assertRaises(TypeError):\n df.isin('a')\n\n with tm.assertRaises(TypeError):\n df.isin('aaa')\n\n def test_isin_df(self):\n df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})\n df2 = DataFrame({'A': [0, 2, 12, 4], 'B': [2, np.nan, 4, 5]})\n expected = DataFrame(False, df1.index, df1.columns)\n result = df1.isin(df2)\n expected['A'].loc[[1, 3]] = True\n expected['B'].loc[[0, 2]] = True\n assert_frame_equal(result, expected)\n\n # partial overlapping columns\n df2.columns = ['A', 'C']\n result = df1.isin(df2)\n expected['B'] = False\n assert_frame_equal(result, expected)\n\n def test_isin_df_dupe_values(self):\n df1 = DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]})\n # just cols duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=['B', 'B'])\n with tm.assertRaises(ValueError):\n df1.isin(df2)\n\n # just index duped\n df2 = DataFrame([[0, 2], [12, 4], [2, np.nan], [4, 5]],\n columns=['A', 'B'], index=[0, 0, 1, 1])\n with tm.assertRaises(ValueError):\n df1.isin(df2)\n\n # cols and index:\n df2.columns = ['B', 'B']\n with tm.assertRaises(ValueError):\n df1.isin(df2)\n\n def test_isin_dupe_self(self):\n other = DataFrame({'A': [1, 0, 1, 0], 'B': [1, 1, 0, 0]})\n df = DataFrame([[1, 1], [1, 0], [0, 0]], columns=['A','A'])\n result = df.isin(other)\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected.loc[0] = True\n expected.iloc[1, 1] = True\n assert_frame_equal(result, expected)\n\n def test_isin_against_series(self):\n df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [2, np.nan, 4, 4]},\n index=['a', 'b', 'c', 'd'])\n s = pd.Series([1, 3, 11, 4], index=['a', 'b', 'c', 'd'])\n expected = DataFrame(False, index=df.index, columns=df.columns)\n expected['A'].loc['a'] = True\n expected.loc['d'] = True\n result = df.isin(s)\n assert_frame_equal(result, expected)\n\n def test_isin_multiIndex(self):\n idx = MultiIndex.from_tuples([(0, 'a', 'foo'), (0, 'a', 'bar'),\n (0, 'b', 'bar'), (0, 'b', 'baz'),\n (2, 'a', 'foo'), (2, 'a', 'bar'),\n (2, 'c', 'bar'), (2, 'c', 'baz'),\n (1, 'b', 'foo'), (1, 'b', 'bar'),\n (1, 'c', 'bar'), (1, 'c', 'baz')])\n df1 = DataFrame({'A': np.ones(12),\n 'B': np.zeros(12)}, index=idx)\n df2 = DataFrame({'A': [1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1],\n 'B': [1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1]})\n # against regular index\n expected = DataFrame(False, index=df1.index, columns=df1.columns)\n result = df1.isin(df2)\n assert_frame_equal(result, expected)\n\n df2.index = idx\n expected = df2.values.astype(np.bool)\n expected[:, 1] = ~expected[:, 1]\n expected = DataFrame(expected, columns=['A', 'B'], index=idx)\n\n result = df1.isin(df2)\n assert_frame_equal(result, expected)\n\n def test_to_csv_date_format(self):\n from pandas import to_datetime\n pname = '__tmp_to_csv_date_format__'\n with ensure_clean(pname) as path:\n for engine in [None, 'python']:\n dt_index = self.tsframe.index\n datetime_frame = DataFrame({'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)\n\n datetime_frame.to_csv(path, date_format='%Y%m%d', engine=engine)\n # Check that the data was put in the specified format\n test = read_csv(path, index_col=0)\n\n datetime_frame_int = datetime_frame.applymap(lambda x: int(x.strftime('%Y%m%d')))\n datetime_frame_int.index = datetime_frame_int.index.map(lambda x: int(x.strftime('%Y%m%d')))\n\n assert_frame_equal(test, datetime_frame_int)\n\n datetime_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)\n # Check that the data was put in the specified format\n test = read_csv(path, index_col=0)\n datetime_frame_str = datetime_frame.applymap(lambda x: x.strftime('%Y-%m-%d'))\n datetime_frame_str.index = datetime_frame_str.index.map(lambda x: x.strftime('%Y-%m-%d'))\n\n assert_frame_equal(test, datetime_frame_str)\n\n # Check that columns get converted\n datetime_frame_columns = datetime_frame.T\n\n datetime_frame_columns.to_csv(path, date_format='%Y%m%d', engine=engine)\n\n test = read_csv(path, index_col=0)\n\n datetime_frame_columns = datetime_frame_columns.applymap(lambda x: int(x.strftime('%Y%m%d')))\n # Columns don't get converted to ints by read_csv\n datetime_frame_columns.columns = datetime_frame_columns.columns.map(lambda x: x.strftime('%Y%m%d'))\n\n assert_frame_equal(test, datetime_frame_columns)\n\n # test NaTs\n nat_index = to_datetime(['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])\n nat_frame = DataFrame({'A': nat_index}, index=nat_index)\n\n nat_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)\n\n test = read_csv(path, parse_dates=[0, 1], index_col=0)\n\n assert_frame_equal(test, nat_frame)\n\n def test_concat_empty_dataframe_dtypes(self):\n df = DataFrame(columns=list(\"abc\"))\n df['a'] = df['a'].astype(np.bool_)\n df['b'] = df['b'].astype(np.int32)\n df['c'] = df['c'].astype(np.float64)\n\n result = pd.concat([df, df])\n self.assertEqual(result['a'].dtype, np.bool_)\n self.assertEqual(result['b'].dtype, np.int32)\n self.assertEqual(result['c'].dtype, np.float64)\n\n result = pd.concat([df, df.astype(np.float64)])\n self.assertEqual(result['a'].dtype, np.object_)\n self.assertEqual(result['b'].dtype, np.float64)\n self.assertEqual(result['c'].dtype, np.float64)\n\n def test_empty_frame_dtypes_ftypes(self):\n empty_df = pd.DataFrame()\n assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))\n assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))\n\n nocols_df = pd.DataFrame(index=[1,2,3])\n assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))\n assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))\n\n norows_df = pd.DataFrame(columns=list(\"abc\"))\n assert_series_equal(norows_df.dtypes, pd.Series(np.object, index=list(\"abc\")))\n assert_series_equal(norows_df.ftypes, pd.Series('object:dense', index=list(\"abc\")))\n\n norows_int_df = pd.DataFrame(columns=list(\"abc\")).astype(np.int32)\n assert_series_equal(norows_int_df.dtypes, pd.Series(np.dtype('int32'), index=list(\"abc\")))\n assert_series_equal(norows_int_df.ftypes, pd.Series('int32:dense', index=list(\"abc\")))\n\n odict = OrderedDict\n df = pd.DataFrame(odict([('a', 1), ('b', True), ('c', 1.0)]), index=[1, 2, 3])\n assert_series_equal(df.dtypes, pd.Series(odict([('a', np.int64),\n ('b', np.bool),\n ('c', np.float64)])))\n assert_series_equal(df.ftypes, pd.Series(odict([('a', 'int64:dense'),\n ('b', 'bool:dense'),\n ('c', 'float64:dense')])))\n\n # same but for empty slice of df\n assert_series_equal(df[:0].dtypes, pd.Series(odict([('a', np.int64),\n ('b', np.bool),\n ('c', np.float64)])))\n assert_series_equal(df[:0].ftypes, pd.Series(odict([('a', 'int64:dense'),\n ('b', 'bool:dense'),\n ('c', 'float64:dense')])))\n\n def test_dtypes_are_correct_after_column_slice(self):\n # GH6525\n df = pd.DataFrame(index=range(5), columns=list(\"abc\"), dtype=np.float_)\n odict = OrderedDict\n assert_series_equal(df.dtypes,\n pd.Series(odict([('a', np.float_), ('b', np.float_),\n ('c', np.float_),])))\n assert_series_equal(df.iloc[:,2:].dtypes,\n pd.Series(odict([('c', np.float_)])))\n assert_series_equal(df.dtypes,\n pd.Series(odict([('a', np.float_), ('b', np.float_),\n ('c', np.float_),])))\n\n def test_set_index_names(self):\n df = pd.util.testing.makeDataFrame()\n df.index.name = 'name'\n\n self.assertEqual(df.set_index(df.index).index.names, ['name'])\n\n mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B'])\n mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values,\n names=['A', 'B', 'A', 'B'])\n\n df = df.set_index(['A', 'B'])\n\n self.assertEqual(df.set_index(df.index).index.names, ['A', 'B'])\n\n # Check that set_index isn't converting a MultiIndex into an Index\n self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex))\n\n # Check actual equality\n tm.assert_index_equal(df.set_index(df.index).index, mi)\n\n # Check that [MultiIndex, MultiIndex] yields a MultiIndex rather\n # than a pair of tuples\n self.assertTrue(isinstance(df.set_index([df.index, df.index]).index, MultiIndex))\n\n # Check equality\n tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2)\n\n def test_select_dtypes_include(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True]})\n ri = df.select_dtypes(include=[np.number])\n ei = df[['b', 'c', 'd']]\n tm.assert_frame_equal(ri, ei)\n\n def test_select_dtypes_exclude(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True]})\n re = df.select_dtypes(exclude=[np.number])\n ee = df[['a', 'e']]\n tm.assert_frame_equal(re, ee)\n\n def test_select_dtypes_exclude_include(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n exclude = np.datetime64,\n include = np.bool_, 'integer'\n r = df.select_dtypes(include=include, exclude=exclude)\n e = df[['b', 'c', 'e']]\n tm.assert_frame_equal(r, e)\n\n exclude = 'datetime',\n include = 'bool', 'int64', 'int32'\n r = df.select_dtypes(include=include, exclude=exclude)\n e = df[['b', 'e']]\n tm.assert_frame_equal(r, e)\n\n def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):\n tm._skip_if_not_numpy17_friendly()\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n df['g'] = df.f.diff()\n assert not hasattr(np, 'u8')\n r = df.select_dtypes(include=['i8', 'O'], exclude=['timedelta'])\n e = df[['a', 'b']]\n tm.assert_frame_equal(r, e)\n\n r = df.select_dtypes(include=['i8', 'O', 'timedelta64[ns]'])\n e = df[['a', 'b', 'g']]\n tm.assert_frame_equal(r, e)\n\n def test_select_dtypes_empty(self):\n df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})\n with tm.assertRaisesRegexp(ValueError, 'at least one of include or '\n 'exclude must be nonempty'):\n df.select_dtypes()\n\n def test_select_dtypes_raises_on_string(self):\n df = DataFrame({'a': list('abc'), 'b': list(range(1, 4))})\n with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):\n df.select_dtypes(include='object')\n with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):\n df.select_dtypes(exclude='object')\n with tm.assertRaisesRegexp(TypeError, 'include and exclude .+ non-'):\n df.select_dtypes(include=int, exclude='object')\n\n def test_select_dtypes_bad_datetime64(self):\n df = DataFrame({'a': list('abc'),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):\n df.select_dtypes(include=['datetime64[D]'])\n\n with tm.assertRaisesRegexp(ValueError, '.+ is too specific'):\n df.select_dtypes(exclude=['datetime64[as]'])\n\n def test_select_dtypes_str_raises(self):\n df = DataFrame({'a': list('abc'),\n 'g': list(u('abc')),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n string_dtypes = set((str, 'str', np.string_, 'S1',\n 'unicode', np.unicode_, 'U1'))\n try:\n string_dtypes.add(unicode)\n except NameError:\n pass\n for dt in string_dtypes:\n with tm.assertRaisesRegexp(TypeError,\n 'string dtypes are not allowed'):\n df.select_dtypes(include=[dt])\n with tm.assertRaisesRegexp(TypeError,\n 'string dtypes are not allowed'):\n df.select_dtypes(exclude=[dt])\n\n def test_select_dtypes_bad_arg_raises(self):\n df = DataFrame({'a': list('abc'),\n 'g': list(u('abc')),\n 'b': list(range(1, 4)),\n 'c': np.arange(3, 6).astype('u1'),\n 'd': np.arange(4.0, 7.0, dtype='float64'),\n 'e': [True, False, True],\n 'f': pd.date_range('now', periods=3).values})\n with tm.assertRaisesRegexp(TypeError, 'data type.*not understood'):\n df.select_dtypes(['blargy, blarg, blarg'])\n\n\ndef skip_if_no_ne(engine='numexpr'):\n if engine == 'numexpr':\n try:\n import numexpr as ne\n except ImportError:\n raise nose.SkipTest(\"cannot query engine numexpr when numexpr not \"\n \"installed\")\n\n\ndef skip_if_no_pandas_parser(parser):\n if parser != 'pandas':\n raise nose.SkipTest(\"cannot evaluate with parser {0!r}\".format(parser))\n\n\nclass TestDataFrameQueryWithMultiIndex(object):\n def check_query_with_named_multiindex(self, parser, engine):\n tm.skip_if_no_ne(engine)\n a = tm.choice(['red', 'green'], size=10)\n b = tm.choice(['eggs', 'ham'], size=10)\n index = MultiIndex.from_arrays([a, b], names=['color', 'food'])\n df = DataFrame(randn(10, 2), index=index)\n ind = Series(df.index.get_level_values('color').values, index=index,\n name='color')\n\n # equality\n res1 = df.query('color == \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" == color', parser=parser, engine=engine)\n exp = df[ind == 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('color != \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" != color', parser=parser, engine=engine)\n exp = df[ind != 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('color == [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] == color', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('color != [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] != color', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"red\"] in color', parser=parser, engine=engine)\n res2 = df.query('\"red\" in color', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"red\"] not in color', parser=parser, engine=engine)\n res2 = df.query('\"red\" not in color', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n def test_query_with_named_multiindex(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_with_named_multiindex, parser, engine\n\n def check_query_with_unnamed_multiindex(self, parser, engine):\n tm.skip_if_no_ne(engine)\n a = tm.choice(['red', 'green'], size=10)\n b = tm.choice(['eggs', 'ham'], size=10)\n index = MultiIndex.from_arrays([a, b])\n df = DataFrame(randn(10, 2), index=index)\n ind = Series(df.index.get_level_values(0).values, index=index)\n\n res1 = df.query('ilevel_0 == \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" == ilevel_0', parser=parser, engine=engine)\n exp = df[ind == 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('ilevel_0 != \"red\"', parser=parser, engine=engine)\n res2 = df.query('\"red\" != ilevel_0', parser=parser, engine=engine)\n exp = df[ind != 'red']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('ilevel_0 == [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] == ilevel_0', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('ilevel_0 != [\"red\"]', parser=parser, engine=engine)\n res2 = df.query('[\"red\"] != ilevel_0', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"red\"] in ilevel_0', parser=parser, engine=engine)\n res2 = df.query('\"red\" in ilevel_0', parser=parser, engine=engine)\n exp = df[ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"red\"] not in ilevel_0', parser=parser, engine=engine)\n res2 = df.query('\"red\" not in ilevel_0', parser=parser, engine=engine)\n exp = df[~ind.isin(['red'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n #### LEVEL 1 ####\n ind = Series(df.index.get_level_values(1).values, index=index)\n res1 = df.query('ilevel_1 == \"eggs\"', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" == ilevel_1', parser=parser, engine=engine)\n exp = df[ind == 'eggs']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # inequality\n res1 = df.query('ilevel_1 != \"eggs\"', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" != ilevel_1', parser=parser, engine=engine)\n exp = df[ind != 'eggs']\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # list equality (really just set membership)\n res1 = df.query('ilevel_1 == [\"eggs\"]', parser=parser, engine=engine)\n res2 = df.query('[\"eggs\"] == ilevel_1', parser=parser, engine=engine)\n exp = df[ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('ilevel_1 != [\"eggs\"]', parser=parser, engine=engine)\n res2 = df.query('[\"eggs\"] != ilevel_1', parser=parser, engine=engine)\n exp = df[~ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n # in/not in ops\n res1 = df.query('[\"eggs\"] in ilevel_1', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" in ilevel_1', parser=parser, engine=engine)\n exp = df[ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n res1 = df.query('[\"eggs\"] not in ilevel_1', parser=parser, engine=engine)\n res2 = df.query('\"eggs\" not in ilevel_1', parser=parser, engine=engine)\n exp = df[~ind.isin(['eggs'])]\n assert_frame_equal(res1, exp)\n assert_frame_equal(res2, exp)\n\n def test_query_with_unnamed_multiindex(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_with_unnamed_multiindex, parser, engine\n\n def check_query_with_partially_named_multiindex(self, parser, engine):\n tm.skip_if_no_ne(engine)\n a = tm.choice(['red', 'green'], size=10)\n b = np.arange(10)\n index = MultiIndex.from_arrays([a, b])\n index.names = [None, 'rating']\n df = DataFrame(randn(10, 2), index=index)\n res = df.query('rating == 1', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values('rating').values, index=index,\n name='rating')\n exp = df[ind == 1]\n assert_frame_equal(res, exp)\n\n res = df.query('rating != 1', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values('rating').values, index=index,\n name='rating')\n exp = df[ind != 1]\n assert_frame_equal(res, exp)\n\n res = df.query('ilevel_0 == \"red\"', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values(0).values, index=index)\n exp = df[ind == \"red\"]\n assert_frame_equal(res, exp)\n\n res = df.query('ilevel_0 != \"red\"', parser=parser, engine=engine)\n ind = Series(df.index.get_level_values(0).values, index=index)\n exp = df[ind != \"red\"]\n assert_frame_equal(res, exp)\n\n def test_query_with_partially_named_multiindex(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_with_partially_named_multiindex, parser, engine\n\n def test_query_multiindex_get_index_resolvers(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_multiindex_get_index_resolvers, parser, engine\n\n def check_query_multiindex_get_index_resolvers(self, parser, engine):\n df = mkdf(10, 3, r_idx_nlevels=2, r_idx_names=['spam', 'eggs'])\n resolvers = df._get_index_resolvers()\n\n def to_series(mi, level):\n level_values = mi.get_level_values(level)\n s = level_values.to_series()\n s.index = mi\n return s\n\n col_series = df.columns.to_series()\n expected = {'index': df.index,\n 'columns': col_series,\n 'spam': to_series(df.index, 'spam'),\n 'eggs': to_series(df.index, 'eggs'),\n 'C0': col_series}\n for k, v in resolvers.items():\n if isinstance(v, Index):\n assert v.is_(expected[k])\n elif isinstance(v, Series):\n tm.assert_series_equal(v, expected[k])\n else:\n raise AssertionError(\"object must be a Series or Index\")\n\n def test_raise_on_panel_with_multiindex(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_raise_on_panel_with_multiindex, parser, engine\n\n def check_raise_on_panel_with_multiindex(self, parser, engine):\n tm.skip_if_no_ne()\n p = tm.makePanel(7)\n p.items = tm.makeCustomIndex(len(p.items), nlevels=2)\n with tm.assertRaises(NotImplementedError):\n pd.eval('p + 1', parser=parser, engine=engine)\n\n def test_raise_on_panel4d_with_multiindex(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_raise_on_panel4d_with_multiindex, parser, engine\n\n def check_raise_on_panel4d_with_multiindex(self, parser, engine):\n tm.skip_if_no_ne()\n p4d = tm.makePanel4D(7)\n p4d.items = tm.makeCustomIndex(len(p4d.items), nlevels=2)\n with tm.assertRaises(NotImplementedError):\n pd.eval('p4d + 1', parser=parser, engine=engine)\n\n\nclass TestDataFrameQueryNumExprPandas(tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryNumExprPandas, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'pandas'\n tm.skip_if_no_ne(cls.engine)\n\n @classmethod\n def tearDownClass(cls):\n super(TestDataFrameQueryNumExprPandas, cls).tearDownClass()\n del cls.engine, cls.parser\n\n def test_date_query_with_attribute_access(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n df = DataFrame(randn(5, 3))\n df['dates1'] = date_range('1/1/2012', periods=5)\n df['dates2'] = date_range('1/1/2013', periods=5)\n df['dates3'] = date_range('1/1/2014', periods=5)\n res = df.query('@df.dates1 < 20130101 < @df.dates3', engine=engine,\n parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_query_no_attribute_access(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(randn(5, 3))\n df['dates1'] = date_range('1/1/2012', periods=5)\n df['dates2'] = date_range('1/1/2013', periods=5)\n df['dates3'] = date_range('1/1/2014', periods=5)\n res = df.query('dates1 < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n tm.assert_frame_equal(res, expec)\n\n def test_date_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates2'] = date_range('1/1/2013', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT\n res = df.query('dates1 < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('index < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.iloc[0, 0] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('index < 20130101 < dates3', engine=engine,\n parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT_duplicates(self):\n engine, parser = self.engine, self.parser\n n = 10\n d = {}\n d['dates1'] = date_range('1/1/2012', periods=n)\n d['dates3'] = date_range('1/1/2014', periods=n)\n df = DataFrame(d)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('index < 20130101 < dates3', engine=engine, parser=parser)\n expec = df[(df.index.to_series() < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_query_with_non_date(self):\n engine, parser = self.engine, self.parser\n\n n = 10\n df = DataFrame({'dates': date_range('1/1/2012', periods=n),\n 'nondate': np.arange(n)})\n\n ops = '==', '!=', '<', '>', '<=', '>='\n\n for op in ops:\n with tm.assertRaises(TypeError):\n df.query('dates %s nondate' % op, parser=parser, engine=engine)\n\n def test_query_syntax_error(self):\n engine, parser = self.engine, self.parser\n df = DataFrame({\"i\": lrange(10), \"+\": lrange(3, 13),\n \"r\": lrange(4, 14)})\n with tm.assertRaises(SyntaxError):\n df.query('i - +', engine=engine, parser=parser)\n\n def test_query_scope(self):\n from pandas.computation.ops import UndefinedVariableError\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.randn(20, 2), columns=list('ab'))\n\n a, b = 1, 2\n res = df.query('a > b', engine=engine, parser=parser)\n expected = df[df.a > df.b]\n tm.assert_frame_equal(res, expected)\n\n res = df.query('@a > b', engine=engine, parser=parser)\n expected = df[a > df.b]\n tm.assert_frame_equal(res, expected)\n\n # no local variable c\n with tm.assertRaises(UndefinedVariableError):\n df.query('@a > b > @c', engine=engine, parser=parser)\n\n # no column named 'c'\n with tm.assertRaises(UndefinedVariableError):\n df.query('@a > b > c', engine=engine, parser=parser)\n\n def test_query_doesnt_pickup_local(self):\n from pandas.computation.ops import UndefinedVariableError\n\n engine, parser = self.engine, self.parser\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n from numpy import sin\n\n # we don't pick up the local 'sin'\n with tm.assertRaises(UndefinedVariableError):\n df.query('sin > 5', engine=engine, parser=parser)\n\n def test_query_builtin(self):\n from pandas.computation.engines import NumExprClobberingError\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n df.index.name = 'sin'\n with tm.assertRaisesRegexp(NumExprClobberingError,\n 'Variables in expression.+'):\n df.query('sin > 5', engine=engine, parser=parser)\n\n def test_query(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randn(10, 3), columns=['a', 'b', 'c'])\n\n assert_frame_equal(df.query('a < b', engine=engine, parser=parser),\n df[df.a < df.b])\n assert_frame_equal(df.query('a + b > b * c', engine=engine,\n parser=parser),\n df[df.a + df.b > df.b * df.c])\n\n def test_query_index_with_name(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randint(10, size=(10, 3)),\n index=Index(range(10), name='blob'),\n columns=['a', 'b', 'c'])\n res = df.query('(blob < 5) & (a < b)', engine=engine, parser=parser)\n expec = df[(df.index < 5) & (df.a < df.b)]\n assert_frame_equal(res, expec)\n\n res = df.query('blob < b', engine=engine, parser=parser)\n expec = df[df.index < df.b]\n\n assert_frame_equal(res, expec)\n\n def test_query_index_without_name(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(np.random.randint(10, size=(10, 3)),\n index=range(10), columns=['a', 'b', 'c'])\n\n # \"index\" should refer to the index\n res = df.query('index < b', engine=engine, parser=parser)\n expec = df[df.index < df.b]\n assert_frame_equal(res, expec)\n\n # test against a scalar\n res = df.query('index < 5', engine=engine, parser=parser)\n expec = df[df.index < 5]\n assert_frame_equal(res, expec)\n\n def test_nested_scope(self):\n engine = self.engine\n parser = self.parser\n\n skip_if_no_pandas_parser(parser)\n\n df = DataFrame(np.random.randn(5, 3))\n df2 = DataFrame(np.random.randn(5, 3))\n expected = df[(df > 0) & (df2 > 0)]\n\n result = df.query('(@df > 0) & (@df2 > 0)', engine=engine, parser=parser)\n assert_frame_equal(result, expected)\n\n result = pd.eval('df[df > 0 and df2 > 0]', engine=engine,\n parser=parser)\n assert_frame_equal(result, expected)\n\n result = pd.eval('df[df > 0 and df2 > 0 and df[df > 0] > 0]',\n engine=engine, parser=parser)\n expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\n assert_frame_equal(result, expected)\n\n result = pd.eval('df[(df>0) & (df2>0)]', engine=engine, parser=parser)\n expected = df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)\n assert_frame_equal(result, expected)\n\n def test_nested_raises_on_local_self_reference(self):\n from pandas.computation.ops import UndefinedVariableError\n\n df = DataFrame(np.random.randn(5, 3))\n\n # can't reference ourself b/c we're a local so @ is necessary\n with tm.assertRaises(UndefinedVariableError):\n df.query('df > 0', engine=self.engine, parser=self.parser)\n\n def test_local_syntax(self):\n skip_if_no_pandas_parser(self.parser)\n\n engine, parser = self.engine, self.parser\n df = DataFrame(randn(100, 10), columns=list('abcdefghij'))\n b = 1\n expect = df[df.a < b]\n result = df.query('a < @b', engine=engine, parser=parser)\n assert_frame_equal(result, expect)\n\n expect = df[df.a < df.b]\n result = df.query('a < b', engine=engine, parser=parser)\n assert_frame_equal(result, expect)\n\n def test_chained_cmp_and_in(self):\n skip_if_no_pandas_parser(self.parser)\n engine, parser = self.engine, self.parser\n cols = list('abc')\n df = DataFrame(randn(100, len(cols)), columns=cols)\n res = df.query('a < b < c and a not in b not in c', engine=engine,\n parser=parser)\n ind = (df.a < df.b) & (df.b < df.c) & ~df.b.isin(df.a) & ~df.c.isin(df.b)\n expec = df[ind]\n assert_frame_equal(res, expec)\n\n def test_local_variable_with_in(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n a = Series(np.random.randint(3, size=15), name='a')\n b = Series(np.random.randint(10, size=15), name='b')\n df = DataFrame({'a': a, 'b': b})\n\n expected = df.loc[(df.b - 1).isin(a)]\n result = df.query('b - 1 in a', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n b = Series(np.random.randint(10, size=15), name='b')\n expected = df.loc[(b - 1).isin(a)]\n result = df.query('@b - 1 in a', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n def test_at_inside_string(self):\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n c = 1\n df = DataFrame({'a': ['a', 'a', 'b', 'b', '@c', '@c']})\n result = df.query('a == \"@c\"', engine=engine, parser=parser)\n expected = df[df.a == \"@c\"]\n tm.assert_frame_equal(result, expected)\n\n def test_query_undefined_local(self):\n from pandas.computation.ops import UndefinedVariableError\n engine, parser = self.engine, self.parser\n skip_if_no_pandas_parser(parser)\n df = DataFrame(np.random.rand(10, 2), columns=list('ab'))\n with tm.assertRaisesRegexp(UndefinedVariableError,\n \"local variable 'c' is not defined\"):\n df.query('a == @c', engine=engine, parser=parser)\n\n def test_index_resolvers_come_after_columns_with_the_same_name(self):\n n = 1\n a = np.r_[20:101:20]\n\n df = DataFrame({'index': a, 'b': np.random.randn(a.size)})\n df.index.name = 'index'\n result = df.query('index > 5', engine=self.engine, parser=self.parser)\n expected = df[df['index'] > 5]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({'index': a, 'b': np.random.randn(a.size)})\n result = df.query('ilevel_0 > 5', engine=self.engine, parser=self.parser)\n expected = df.loc[df.index[df.index > 5]]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({'a': a, 'b': np.random.randn(a.size)})\n df.index.name = 'a'\n result = df.query('a > 5', engine=self.engine, parser=self.parser)\n expected = df[df.a > 5]\n tm.assert_frame_equal(result, expected)\n\n result = df.query('index > 5', engine=self.engine, parser=self.parser)\n expected = df.loc[df.index[df.index > 5]]\n tm.assert_frame_equal(result, expected)\n\n def test_inf(self):\n n = 10\n df = DataFrame({'a': np.random.rand(n), 'b': np.random.rand(n)})\n df.loc[::2, 0] = np.inf\n ops = '==', '!='\n d = dict(zip(ops, (operator.eq, operator.ne)))\n for op, f in d.items():\n q = 'a %s inf' % op\n expected = df[f(df.a, np.inf)]\n result = df.query(q, engine=self.engine, parser=self.parser)\n tm.assert_frame_equal(result, expected)\n\n\nclass TestDataFrameQueryNumExprPython(TestDataFrameQueryNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryNumExprPython, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'python'\n tm.skip_if_no_ne(cls.engine)\n cls.frame = _frame.copy()\n\n def test_date_query_no_attribute_access(self):\n engine, parser = self.engine, self.parser\n df = DataFrame(randn(5, 3))\n df['dates1'] = date_range('1/1/2012', periods=5)\n df['dates2'] = date_range('1/1/2013', periods=5)\n df['dates3'] = date_range('1/1/2014', periods=5)\n res = df.query('(dates1 < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n tm.assert_frame_equal(res, expec)\n def test_date_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates2'] = date_range('1/1/2013', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.loc[np.random.rand(n) > 0.5, 'dates3'] = pd.NaT\n res = df.query('(dates1 < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.dates1 < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('(index < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.iloc[0, 0] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n res = df.query('(index < 20130101) & (20130101 < dates3)',\n engine=engine, parser=parser)\n expec = df[(df.index < '20130101') & ('20130101' < df.dates3)]\n assert_frame_equal(res, expec)\n\n def test_date_index_query_with_NaT_duplicates(self):\n engine, parser = self.engine, self.parser\n n = 10\n df = DataFrame(randn(n, 3))\n df['dates1'] = date_range('1/1/2012', periods=n)\n df['dates3'] = date_range('1/1/2014', periods=n)\n df.loc[np.random.rand(n) > 0.5, 'dates1'] = pd.NaT\n df.set_index('dates1', inplace=True, drop=True)\n with tm.assertRaises(NotImplementedError):\n df.query('index < 20130101 < dates3', engine=engine, parser=parser)\n\n def test_nested_scope(self):\n from pandas.computation.ops import UndefinedVariableError\n engine = self.engine\n parser = self.parser\n # smoke test\n x = 1\n result = pd.eval('x + 1', engine=engine, parser=parser)\n self.assertEqual(result, 2)\n\n df = DataFrame(np.random.randn(5, 3))\n df2 = DataFrame(np.random.randn(5, 3))\n\n # don't have the pandas parser\n with tm.assertRaises(SyntaxError):\n df.query('(@df>0) & (@df2>0)', engine=engine, parser=parser)\n\n with tm.assertRaises(UndefinedVariableError):\n df.query('(df>0) & (df2>0)', engine=engine, parser=parser)\n\n expected = df[(df > 0) & (df2 > 0)]\n result = pd.eval('df[(df > 0) & (df2 > 0)]', engine=engine,\n parser=parser)\n tm.assert_frame_equal(expected, result)\n\n expected = df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]\n result = pd.eval('df[(df > 0) & (df2 > 0) & (df[df > 0] > 0)]',\n engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryPythonPandas(TestDataFrameQueryNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryPythonPandas, cls).setUpClass()\n cls.engine = 'python'\n cls.parser = 'pandas'\n cls.frame = _frame.copy()\n\n def test_query_builtin(self):\n from pandas.computation.engines import NumExprClobberingError\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n df.index.name = 'sin'\n expected = df[df.index > 5]\n result = df.query('sin > 5', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nclass TestDataFrameQueryPythonPython(TestDataFrameQueryNumExprPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameQueryPythonPython, cls).setUpClass()\n cls.engine = cls.parser = 'python'\n cls.frame = _frame.copy()\n\n def test_query_builtin(self):\n from pandas.computation.engines import NumExprClobberingError\n engine, parser = self.engine, self.parser\n\n n = m = 10\n df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))\n\n df.index.name = 'sin'\n expected = df[df.index > 5]\n result = df.query('sin > 5', engine=engine, parser=parser)\n tm.assert_frame_equal(expected, result)\n\n\nPARSERS = 'python', 'pandas'\nENGINES = 'python', 'numexpr'\n\n\nclass TestDataFrameQueryStrings(object):\n def check_str_query_method(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame(randn(10, 1), columns=['b'])\n df['strings'] = Series(list('aabbccddee'))\n expect = df[df.strings == 'a']\n\n if parser != 'pandas':\n col = 'strings'\n lst = '\"a\"'\n\n lhs = [col] * 2 + [lst] * 2\n rhs = lhs[::-1]\n\n eq, ne = '==', '!='\n ops = 2 * ([eq] + [ne])\n\n for lhs, op, rhs in zip(lhs, ops, rhs):\n ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)\n assertRaises(NotImplementedError, df.query, ex, engine=engine,\n parser=parser, local_dict={'strings': df.strings})\n else:\n res = df.query('\"a\" == strings', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('strings == \"a\"', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n assert_frame_equal(res, df[df.strings.isin(['a'])])\n\n expect = df[df.strings != 'a']\n res = df.query('strings != \"a\"', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('\"a\" != strings', engine=engine, parser=parser)\n assert_frame_equal(res, expect)\n assert_frame_equal(res, df[~df.strings.isin(['a'])])\n\n def test_str_query_method(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_str_query_method, parser, engine\n\n def test_str_list_query_method(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_str_list_query_method, parser, engine\n\n def check_str_list_query_method(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame(randn(10, 1), columns=['b'])\n df['strings'] = Series(list('aabbccddee'))\n expect = df[df.strings.isin(['a', 'b'])]\n\n if parser != 'pandas':\n col = 'strings'\n lst = '[\"a\", \"b\"]'\n\n lhs = [col] * 2 + [lst] * 2\n rhs = lhs[::-1]\n\n eq, ne = '==', '!='\n ops = 2 * ([eq] + [ne])\n\n for lhs, op, rhs in zip(lhs, ops, rhs):\n ex = '{lhs} {op} {rhs}'.format(lhs=lhs, op=op, rhs=rhs)\n with tm.assertRaises(NotImplementedError):\n df.query(ex, engine=engine, parser=parser)\n else:\n res = df.query('strings == [\"a\", \"b\"]', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('[\"a\", \"b\"] == strings', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n expect = df[~df.strings.isin(['a', 'b'])]\n\n res = df.query('strings != [\"a\", \"b\"]', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n res = df.query('[\"a\", \"b\"] != strings', engine=engine,\n parser=parser)\n assert_frame_equal(res, expect)\n\n def check_query_with_string_columns(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame({'a': list('aaaabbbbcccc'),\n 'b': list('aabbccddeeff'),\n 'c': np.random.randint(5, size=12),\n 'd': np.random.randint(9, size=12)})\n if parser == 'pandas':\n res = df.query('a in b', parser=parser, engine=engine)\n expec = df[df.a.isin(df.b)]\n assert_frame_equal(res, expec)\n\n res = df.query('a in b and c < d', parser=parser, engine=engine)\n expec = df[df.a.isin(df.b) & (df.c < df.d)]\n assert_frame_equal(res, expec)\n else:\n with assertRaises(NotImplementedError):\n df.query('a in b', parser=parser, engine=engine)\n\n with assertRaises(NotImplementedError):\n df.query('a in b and c < d', parser=parser, engine=engine)\n\n def test_query_with_string_columns(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_with_string_columns, parser, engine\n\n def check_object_array_eq_ne(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = DataFrame({'a': list('aaaabbbbcccc'),\n 'b': list('aabbccddeeff'),\n 'c': np.random.randint(5, size=12),\n 'd': np.random.randint(9, size=12)})\n res = df.query('a == b', parser=parser, engine=engine)\n exp = df[df.a == df.b]\n assert_frame_equal(res, exp)\n\n res = df.query('a != b', parser=parser, engine=engine)\n exp = df[df.a != df.b]\n assert_frame_equal(res, exp)\n\n def test_object_array_eq_ne(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_object_array_eq_ne, parser, engine\n\n def check_query_with_nested_strings(self, parser, engine):\n tm.skip_if_no_ne(engine)\n skip_if_no_pandas_parser(parser)\n from pandas.compat import StringIO\n raw = \"\"\"id event timestamp\n 1 \"page 1 load\" 1/1/2014 0:00:01\n 1 \"page 1 exit\" 1/1/2014 0:00:31\n 2 \"page 2 load\" 1/1/2014 0:01:01\n 2 \"page 2 exit\" 1/1/2014 0:01:31\n 3 \"page 3 load\" 1/1/2014 0:02:01\n 3 \"page 3 exit\" 1/1/2014 0:02:31\n 4 \"page 1 load\" 2/1/2014 1:00:01\n 4 \"page 1 exit\" 2/1/2014 1:00:31\n 5 \"page 2 load\" 2/1/2014 1:01:01\n 5 \"page 2 exit\" 2/1/2014 1:01:31\n 6 \"page 3 load\" 2/1/2014 1:02:01\n 6 \"page 3 exit\" 2/1/2014 1:02:31\n \"\"\"\n df = pd.read_csv(StringIO(raw), sep=r'\\s{2,}', engine='python',\n parse_dates=['timestamp'])\n expected = df[df.event == '\"page 1 load\"']\n res = df.query(\"\"\"'\"page 1 load\"' in event\"\"\", parser=parser,\n engine=engine)\n tm.assert_frame_equal(expected, res)\n\n def test_query_with_nested_string(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_with_nested_strings, parser, engine\n\n def check_query_with_nested_special_character(self, parser, engine):\n skip_if_no_pandas_parser(parser)\n tm.skip_if_no_ne(engine)\n df = DataFrame({'a': ['a', 'b', 'test & test'],\n 'b': [1, 2, 3]})\n res = df.query('a == \"test & test\"', parser=parser, engine=engine)\n expec = df[df.a == 'test & test']\n tm.assert_frame_equal(res, expec)\n\n def test_query_with_nested_special_character(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_with_nested_special_character, parser, engine\n\n def check_query_lex_compare_strings(self, parser, engine):\n tm.skip_if_no_ne(engine=engine)\n import operator as opr\n\n a = Series(tm.choice(list('abcde'), 20))\n b = Series(np.arange(a.size))\n df = DataFrame({'X': a, 'Y': b})\n\n ops = {'<': opr.lt, '>': opr.gt, '<=': opr.le, '>=': opr.ge}\n\n for op, func in ops.items():\n res = df.query('X %s \"d\"' % op, engine=engine, parser=parser)\n expected = df[func(df.X, 'd')]\n assert_frame_equal(res, expected)\n\n def test_query_lex_compare_strings(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_lex_compare_strings, parser, engine\n\n def check_query_single_element_booleans(self, parser, engine):\n tm.skip_if_no_ne(engine)\n columns = 'bid', 'bidsize', 'ask', 'asksize'\n data = np.random.randint(2, size=(1, len(columns))).astype(bool)\n df = DataFrame(data, columns=columns)\n res = df.query('bid & ask', engine=engine, parser=parser)\n expected = df[df.bid & df.ask]\n assert_frame_equal(res, expected)\n\n def test_query_single_element_booleans(self):\n for parser, engine in product(PARSERS, ENGINES):\n yield self.check_query_single_element_booleans, parser, engine\n\n def check_query_string_scalar_variable(self, parser, engine):\n tm.skip_if_no_ne(engine)\n df = pd.DataFrame({'Symbol': ['BUD US', 'BUD US', 'IBM US', 'IBM US'],\n 'Price': [109.70, 109.72, 183.30, 183.35]})\n e = df[df.Symbol == 'BUD US']\n symb = 'BUD US'\n r = df.query('Symbol == @symb', parser=parser, engine=engine)\n tm.assert_frame_equal(e, r)\n\n def test_query_string_scalar_variable(self):\n for parser, engine in product(['pandas'], ENGINES):\n yield self.check_query_string_scalar_variable, parser, engine\n\n\nclass TestDataFrameEvalNumExprPandas(tm.TestCase):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalNumExprPandas, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'pandas'\n tm.skip_if_no_ne()\n\n def setUp(self):\n self.frame = DataFrame(randn(10, 3), columns=list('abc'))\n\n def tearDown(self):\n del self.frame\n\n def test_simple_expr(self):\n res = self.frame.eval('a + b', engine=self.engine, parser=self.parser)\n expect = self.frame.a + self.frame.b\n assert_series_equal(res, expect)\n\n def test_bool_arith_expr(self):\n res = self.frame.eval('a[a < 1] + b', engine=self.engine,\n parser=self.parser)\n expect = self.frame.a[self.frame.a < 1] + self.frame.b\n assert_series_equal(res, expect)\n\n def test_invalid_type_for_operator_raises(self):\n df = DataFrame({'a': [1, 2], 'b': ['c', 'd']})\n ops = '+', '-', '*', '/'\n for op in ops:\n with tm.assertRaisesRegexp(TypeError,\n \"unsupported operand type\\(s\\) for \"\n \".+: '.+' and '.+'\"):\n df.eval('a {0} b'.format(op), engine=self.engine,\n parser=self.parser)\n\n\nclass TestDataFrameEvalNumExprPython(TestDataFrameEvalNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalNumExprPython, cls).setUpClass()\n cls.engine = 'numexpr'\n cls.parser = 'python'\n tm.skip_if_no_ne(cls.engine)\n\n\nclass TestDataFrameEvalPythonPandas(TestDataFrameEvalNumExprPandas):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalPythonPandas, cls).setUpClass()\n cls.engine = 'python'\n cls.parser = 'pandas'\n\n\nclass TestDataFrameEvalPythonPython(TestDataFrameEvalNumExprPython):\n\n @classmethod\n def setUpClass(cls):\n super(TestDataFrameEvalPythonPython, cls).tearDownClass()\n cls.engine = cls.parser = 'python'\n\n\nif __name__ == '__main__':\n nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],\n exit=False)\n"
] | [
[
"pandas.util.testing.choice",
"pandas.merge",
"pandas.core.format.set_option",
"numpy.sqrt",
"pandas.DataFrame.from_csv",
"pandas.util.testing.assert_produces_warning",
"pandas.MultiIndex.from_tuples",
"pandas.compat.lzip",
"numpy.where",
"pandas.compat.OrderedDict",
"pandas.compat.text_type",
"pandas.tseries.tools.to_datetime",
"numpy.zeros",
"pandas.DataFrame.from_items",
"pandas.concat",
"pandas.core.nanops.nansem",
"pandas.MultiIndex",
"numpy.median",
"pandas.util.testing.getMixedTypeDict",
"pandas.util.testing.getSeriesData",
"pandas.core.common.is_integer",
"pandas.date_range",
"scipy.stats.kurtosis",
"numpy.array",
"numpy.datetime64",
"numpy.testing.assert_array_equal",
"numpy.random.permutation",
"numpy.shape",
"numpy.ma.masked_array",
"numpy.isinf",
"pandas.compat.range",
"pandas.Series",
"pandas.util.testing.assert_isinstance",
"pandas.core.common.is_integer_dtype",
"numpy.asarray",
"pandas.util.testing.makePanel",
"numpy.core.records.fromarrays",
"numpy.var",
"pandas.reset_option",
"pandas.compat.StringIO",
"numpy.std",
"pandas.set_option",
"pandas.util.testing.equalContents",
"scipy.stats.skew",
"numpy.putmask",
"pandas.core.datetools.BDay",
"pandas.compat.u",
"pandas.util.testing.makeStringIndex",
"numpy.timedelta64",
"numpy.atleast_2d",
"numpy.random.rand",
"pandas.DataFrame.from_dict",
"numpy.corrcoef",
"numpy.ma.mrecords.fromarrays",
"pandas.util.misc.is_little_endian",
"numpy.array_equal",
"pandas.util.testing.assertRaisesRegexp",
"numpy.ma.copy",
"numpy.ones",
"pandas.util.testing.getTimeSeriesData",
"pandas.util.testing.rands",
"pandas.Period",
"numpy.isscalar",
"numpy.empty",
"pandas.core.nanops.nanvar",
"pandas.util.testing.assert_dict_equal",
"numpy.linspace",
"pandas.DataFrame",
"numpy.round",
"numpy.mean",
"pandas.compat.iteritems",
"pandas.DataFrame.from_records",
"numpy.random.randint",
"numpy.hstack",
"numpy.testing.assert_equal",
"pandas.Index",
"pandas.util.testing.assert_series_equal",
"pandas.util.testing._skip_if_no_scipy",
"numpy.lexsort",
"pandas.DatetimeIndex",
"numpy.repeat",
"pandas.compat.long",
"pandas.util.testing._skip_if_not_numpy17_friendly",
"pandas.notnull",
"numpy.isnan",
"pandas.util.testing.assert_almost_equal",
"numpy.cov",
"pandas.util.testing.skip_if_no_ne",
"pandas.util.testing.makeCustomDataframe",
"pandas.eval",
"pandas.isnull",
"numpy.tile",
"numpy.percentile",
"pandas.util.testing.assertRaises",
"pandas.util.testing.makePeriodFrame",
"pandas.compat.zip",
"pandas.core.common.pprint_thing",
"numpy.dot",
"pandas.to_datetime",
"pandas.util.testing.ensure_clean",
"numpy.dtype",
"pandas.util.testing.assert_frame_equal",
"numpy.random.randn",
"pandas.util.testing.makeDataFrame",
"pandas.util.testing.makeTimeDataFrame",
"pandas.util.testing.makePanel4D",
"pandas.read_csv",
"numpy.arange",
"pandas.core.common.is_float_dtype",
"pandas.compat.lmap",
"numpy.apply_along_axis",
"pandas.option_context",
"pandas.compat.cPickle.dumps",
"numpy.ma.masked_all",
"numpy.random.random",
"pandas.util.testing.makeTimeSeries",
"pandas.sparse.api.SparseDataFrame",
"numpy.abs",
"pandas.MultiIndex.from_arrays",
"pandas.core.common.isnull",
"pandas.Timestamp",
"pandas.compat.lrange"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"0.19",
"0.24",
"0.20"
],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
zeyh/neural-holography | [
"d2e399014aa80844edffd98bca34d2df80a69c84"
] | [
"main.py"
] | [
"\"\"\"\r\nNeural holography:\r\n\r\nThis is the main executive script used for the phase generation using Holonet/UNET or\r\n optimization using (GS/DPAC/SGD) + camera-in-the-loop (CITL).\r\n\r\nThis code and data is released under the Creative Commons Attribution-NonCommercial 4.0 International license (CC BY-NC.) In a nutshell:\r\n # The license is only for non-commercial use (commercial licenses can be obtained from Stanford).\r\n # The material is provided as-is, with no warranties whatsoever.\r\n # If you publish any code, data, or scientific work based on this, please cite our work.\r\n\r\n@article{Peng:2020:NeuralHolography,\r\nauthor = {Y. Peng, S. Choi, N. Padmanaban, G. Wetzstein},\r\ntitle = {{Neural Holography with Camera-in-the-loop Training}},\r\njournal = {ACM Trans. Graph. (SIGGRAPH Asia)},\r\nyear = {2020},\r\n}\r\n\r\n-----\r\n\r\n$ python main.py --channel=0 --algorithm=HOLONET --root_path=./phases --generator_dir=./pretrained_models\r\n\"\"\"\r\n\r\nimport os\r\nimport sys\r\nimport cv2\r\nimport torch\r\nimport torch.nn as nn\r\nimport configargparse\r\nfrom torch.utils.tensorboard import SummaryWriter\r\n\r\nimport utils.utils as utils\r\nfrom utils.augmented_image_loader import ImageLoader\r\nfrom propagation_model import ModelPropagate\r\nfrom utils.modules import SGD, GS, DPAC, PhysicalProp\r\nfrom holonet import HoloNet, InitialPhaseUnet, FinalPhaseOnlyUnet, PhaseOnlyUnet\r\nfrom propagation_ASM import propagation_ASM\r\n\r\n# Command line argument processing\r\np = configargparse.ArgumentParser()\r\np.add('-c', '--config_filepath', required=False, is_config_file=True, help='Path to config file.')\r\n\r\np.add_argument('--channel', type=int, default=1, help='Red:0, green:1, blue:2')\r\np.add_argument('--method', type=str, default='SGD', help='Type of algorithm, GS/SGD/DPAC/HOLONET/UNET')\r\np.add_argument('--prop_model', type=str, default='ASM', help='Type of propagation model, ASM or model')\r\np.add_argument('--root_path', type=str, default='./phases', help='Directory where optimized phases will be saved.')\r\np.add_argument('--data_path', type=str, default='./data', help='Directory for the dataset')\r\np.add_argument('--generator_dir', type=str, default='./pretrained_networks',\r\n help='Directory for the pretrained holonet/unet network')\r\np.add_argument('--prop_model_dir', type=str, default='./calibrated_models',\r\n help='Directory for the CITL-calibrated wave propagation models')\r\np.add_argument('--citl', type=utils.str2bool, default=False, help='Use of Camera-in-the-loop optimization with SGD')\r\np.add_argument('--experiment', type=str, default='', help='Name of experiment')\r\np.add_argument('--lr', type=float, default=8e-3, help='Learning rate for phase variables (for SGD)')\r\np.add_argument('--lr_s', type=float, default=2e-3, help='Learning rate for learnable scale (for SGD)')\r\np.add_argument('--num_iters', type=int, default=500, help='Number of iterations (GS, SGD)')\r\n\r\n# parse arguments\r\nopt = p.parse_args()\r\nrun_id = f'{opt.experiment}_{opt.method}_{opt.prop_model}' # {algorithm}_{prop_model} format\r\nif opt.citl:\r\n run_id = f'{run_id}_citl'\r\n\r\nchannel = opt.channel # Red:0 / Green:1 / Blue:2\r\nchan_str = ('red', 'green', 'blue')[channel]\r\n\r\nprint(f' - optimizing phase with {opt.method}/{opt.prop_model} ... ')\r\nif opt.citl:\r\n print(f' - with camera-in-the-loop ...')\r\n\r\n# Hyperparameters setting\r\ncm, mm, um, nm = 1e-2, 1e-3, 1e-6, 1e-9\r\nprop_dist = (20 * cm, 20 * cm, 20 * cm)[channel] # propagation distance from SLM plane to target plane\r\nwavelength = (638 * nm, 520 * nm, 450 * nm)[channel] # wavelength of each color\r\nfeature_size = (6.4 * um, 6.4 * um) # SLM pitch\r\nslm_res = (1080, 1920) # resolution of SLM\r\nimage_res = (1080, 1920)\r\nroi_res = (880, 1600) # regions of interest (to penalize for SGD)\r\ndtype = torch.float32 # default datatype (Note: the result may be slightly different if you use float64, etc.)\r\ndevice = torch.device('cuda') # The gpu you are using\r\n\r\n# Options for the algorithm\r\nloss = nn.MSELoss().to(device) # loss functions to use (try other loss functions!)\r\ns0 = 1.0 # initial scale\r\n\r\nroot_path = os.path.join(opt.root_path, run_id, chan_str) # path for saving out optimized phases\r\n\r\n# Tensorboard writer\r\nsummaries_dir = os.path.join(root_path, 'summaries')\r\nutils.cond_mkdir(summaries_dir)\r\nwriter = SummaryWriter(summaries_dir)\r\n\r\n# Hardware setup for CITL\r\nif opt.citl:\r\n camera_prop = PhysicalProp(channel, laser_arduino=True, roi_res=(roi_res[1], roi_res[0]), slm_settle_time=0.12,\r\n range_row=(220, 1000), range_col=(300, 1630),\r\n patterns_path=f'F:/citl/calibration',\r\n show_preview=True)\r\nelse:\r\n camera_prop = None\r\n\r\n# Simulation model\r\nif opt.prop_model == 'ASM':\r\n propagator = propagation_ASM # Ideal model\r\n\r\nelif opt.prop_model.upper() == 'MODEL':\r\n blur = utils.make_kernel_gaussian(0.85, 3)\r\n propagator = ModelPropagate(distance=prop_dist, # Parameterized wave propagation model\r\n feature_size=feature_size,\r\n wavelength=wavelength,\r\n blur=blur).to(device)\r\n\r\n # load CITL-calibrated model\r\n propagator.load_state_dict(torch.load(f'{opt.prop_model_dir}/{chan_str}.pth', map_location=device))\r\n propagator.eval()\r\n\r\n\r\n# Select Phase generation method, algorithm\r\nif opt.method == 'SGD':\r\n phase_only_algorithm = SGD(prop_dist, wavelength, feature_size, opt.num_iters, roi_res, root_path,\r\n opt.prop_model, propagator, loss, opt.lr, opt.lr_s, s0, opt.citl, camera_prop, writer, device)\r\nelif opt.method == 'GS':\r\n phase_only_algorithm = GS(prop_dist, wavelength, feature_size, opt.num_iters, root_path,\r\n opt.prop_model, propagator, writer, device)\r\nelif opt.method == 'DPAC':\r\n phase_only_algorithm = DPAC(prop_dist, wavelength, feature_size, opt.prop_model, propagator, device)\r\nelif opt.method == 'HOLONET':\r\n phase_only_algorithm = HoloNet(prop_dist, wavelength, feature_size, initial_phase=InitialPhaseUnet(4, 16),\r\n final_phase_only=FinalPhaseOnlyUnet(4, 16, num_in=2)).to(device)\r\n model_path = os.path.join(opt.generator_dir, f'holonet20_{chan_str}.pth')\r\n image_res = (1072, 1920)\r\nelif opt.method == 'UNET':\r\n phase_only_algorithm = PhaseOnlyUnet(num_features_init=32).to(device)\r\n model_path = os.path.join(opt.generator_dir, f'unet20_{chan_str}.pth')\r\n image_res = (1024, 2048)\r\n\r\nif 'NET' in opt.method:\r\n checkpoint = torch.load(model_path)\r\n phase_only_algorithm.load_state_dict(checkpoint)\r\n phase_only_algorithm.eval()\r\n\r\n\r\n# Augmented image loader (if you want to shuffle, augment dataset, put options accordingly.)\r\nimage_loader = ImageLoader(opt.data_path, channel=channel,\r\n image_res=image_res, homography_res=roi_res,\r\n crop_to_homography=True,\r\n shuffle=False, vertical_flips=False, horizontal_flips=False)\r\n\r\n# Loop over the dataset\r\nfor k, target in enumerate(image_loader):\r\n # get target image\r\n target_amp, target_res, target_filename = target\r\n target_path, target_filename = os.path.split(target_filename[0])\r\n target_idx = target_filename.split('_')[-1]\r\n target_amp = target_amp.to(device)\r\n print(target_idx)\r\n\r\n # if you want to separate folders by target_idx or whatever, you can do so here.\r\n phase_only_algorithm.init_scale = s0 * utils.crop_image(target_amp, roi_res, stacked_complex=False).mean()\r\n phase_only_algorithm.phase_path = os.path.join(root_path)\r\n\r\n # run algorithm (See algorithm_modules.py and algorithms.py)\r\n if opt.method in ['DPAC', 'HOLONET', 'UNET']:\r\n # direct methods\r\n _, final_phase = phase_only_algorithm(target_amp)\r\n else:\r\n # iterative methods, initial phase: random guess\r\n init_phase = (-0.5 + 1.0 * torch.rand(1, 1, *slm_res)).to(device)\r\n final_phase = phase_only_algorithm(target_amp, init_phase)\r\n\r\n print(final_phase.shape)\r\n\r\n # save the final result somewhere.\r\n phase_out_8bit = utils.phasemap_8bit(final_phase.cpu().detach(), inverted=True)\r\n\r\n utils.cond_mkdir(root_path)\r\n cv2.imwrite(os.path.join(root_path, f'{target_idx}.png'), phase_out_8bit)\r\n\r\nprint(f' - Done, result: --root_path={root_path}')\r\n"
] | [
[
"torch.load",
"torch.utils.tensorboard.SummaryWriter",
"torch.rand",
"torch.device",
"torch.nn.MSELoss"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
perseveranceLX/iterative-dehaze | [
"dc5d56b2bf86460fff9d478e68368d2fffe78ba4"
] | [
"model/IPUDN_Grid.py"
] | [
"import torch\nimport torch.nn as nn\nimport math\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\ndef make_model():\n\treturn IPUDN()\n\nclass MeanShift(nn.Conv2d):\n\tdef __init__(self, rgb_range, rgb_mean=(0.4488, 0.4371, 0.4040), rgb_std=(1.0, 1.0, 1.0), sign=-1):\n\t\tsuper(MeanShift, self).__init__(3, 3, kernel_size=1)\n\t\tstd = torch.Tensor(rgb_std)\n\t\tself.weight.data = torch.eye(3).view(3, 3, 1, 1) / std.view(3, 1, 1, 1)\n\t\tself.bias.data = sign * rgb_range * torch.Tensor(rgb_mean) / std\n\t\tfor p in self.parameters():\n\t\t\tp.requires_grad = False\n\n\n\n# --- Build dense --- #\nclass MakeDense(nn.Module):\n\tdef __init__(self, in_channels, growth_rate, kernel_size=3):\n\t\tsuper(MakeDense, self).__init__()\n\t\tself.conv = nn.Conv2d(in_channels, growth_rate, kernel_size=kernel_size, padding=(kernel_size-1)//2)\n\n\tdef forward(self, x):\n\t\tout = F.relu(self.conv(x))\n\t\tout = torch.cat((x, out), 1)\n\t\treturn out\n\n\n# --- Build the Residual Dense Block --- #\nclass RDB(nn.Module):\n\tdef __init__(self, in_channels, num_dense_layer, growth_rate):\n\t\t\"\"\"\n\t\t:param in_channels: input channel size\n\t\t:param num_dense_layer: the number of RDB layers\n\t\t:param growth_rate: growth_rate\n\t\t\"\"\"\n\t\tsuper(RDB, self).__init__()\n\t\t_in_channels = in_channels\n\t\tmodules = []\n\t\tfor i in range(num_dense_layer):\n\t\t\tmodules.append(MakeDense(_in_channels, growth_rate))\n\t\t\t_in_channels += growth_rate\n\t\tself.residual_dense_layers = nn.Sequential(*modules)\n\t\tself.conv_1x1 = nn.Conv2d(_in_channels, in_channels, kernel_size=1, padding=0)\n\n\tdef forward(self, x):\n\t\tout = self.residual_dense_layers(x)\n\t\tout = self.conv_1x1(out)\n\t\tout = out + x\n\t\treturn out\n\n\n\n# --- Downsampling block in GridDehazeNet --- #\nclass DownSample(nn.Module):\n\tdef __init__(self, in_channels, kernel_size=3, stride=2):\n\t\tsuper(DownSample, self).__init__()\n\t\tself.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride=stride, padding=(kernel_size-1)//2)\n\t\tself.conv2 = nn.Conv2d(in_channels, stride*in_channels, kernel_size, stride=1, padding=(kernel_size - 1) // 2)\n\n\tdef forward(self, x):\n\t\tout = F.relu(self.conv1(x))\n\t\tout = F.relu(self.conv2(out))\n\t\treturn out\n\n\n# --- Upsampling block in GridDehazeNet --- #\nclass UpSample(nn.Module):\n\tdef __init__(self, in_channels, kernel_size=3, stride=2):\n\t\tsuper(UpSample, self).__init__()\n\t\tself.deconv = nn.ConvTranspose2d(in_channels, in_channels, kernel_size, stride=stride, padding=1)\n\t\tself.conv = nn.Conv2d(in_channels, in_channels // stride, kernel_size, stride=1, padding=(kernel_size - 1) // 2)\n\n\tdef forward(self, x, output_size):\n\t\tout = F.relu(self.deconv(x, output_size=output_size))\n\t\tout = F.relu(self.conv(out))\n\t\treturn out\n\n\n# --- Main model --- #\nclass GridDehazeNet(nn.Module):\n\tdef __init__(self, in_channels=3, depth_rate=16, kernel_size=3, stride=2, height=3, width=6, num_dense_layer=4, growth_rate=16, attention=True):\n\t\tsuper(GridDehazeNet, self).__init__()\n\t\tself.rdb_module = nn.ModuleDict()\n\t\tself.upsample_module = nn.ModuleDict()\n\t\tself.downsample_module = nn.ModuleDict()\n\t\tself.height = height\n\t\tself.width = width\n\t\tself.stride = stride\n\t\tself.depth_rate = depth_rate\n\t\tself.coefficient = nn.Parameter(torch.Tensor(np.ones((height, width, 2, depth_rate*stride**(height-1)))), requires_grad=attention)\n\t\tself.rdb_in = RDB(depth_rate, num_dense_layer, growth_rate)\n\t\tself.rdb_out = RDB(depth_rate, num_dense_layer, growth_rate)\n\n\t\t\n\n\t\trdb_in_channels = depth_rate\n\t\tfor i in range(height):\n\t\t\tfor j in range(width - 1):\n\t\t\t\tself.rdb_module.update({'{}_{}'.format(i, j): RDB(rdb_in_channels, num_dense_layer, growth_rate)})\n\t\t\trdb_in_channels *= stride\n\n\t\t_in_channels = depth_rate\n\t\tfor i in range(height - 1):\n\t\t\tfor j in range(width // 2):\n\t\t\t\tself.downsample_module.update({'{}_{}'.format(i, j): DownSample(_in_channels)})\n\t\t\t_in_channels *= stride\n\n\t\tfor i in range(height - 2, -1, -1):\n\t\t\tfor j in range(width // 2, width):\n\t\t\t\tself.upsample_module.update({'{}_{}'.format(i, j): UpSample(_in_channels)})\n\t\t\t_in_channels //= stride\n\n\tdef forward(self, inp):\n\t\t#x = self.sub_mean(x)\n\t\t#inp = self.conv_in(x)\n\n\t\tx_index = [[0 for _ in range(self.width)] for _ in range(self.height)]\n\t\ti, j = 0, 0\n\n\t\tx_index[0][0] = self.rdb_in(inp)\n\n\t\tfor j in range(1, self.width // 2):\n\t\t\tx_index[0][j] = self.rdb_module['{}_{}'.format(0, j-1)](x_index[0][j-1])\n\n\t\tfor i in range(1, self.height):\n\t\t\tx_index[i][0] = self.downsample_module['{}_{}'.format(i-1, 0)](x_index[i-1][0])\n\n\t\tfor i in range(1, self.height):\n\t\t\tfor j in range(1, self.width // 2):\n\t\t\t\tchannel_num = int(2**(i-1)*self.stride*self.depth_rate)\n\t\t\t\tx_index[i][j] = self.coefficient[i, j, 0, :channel_num][None, :, None, None] * self.rdb_module['{}_{}'.format(i, j-1)](x_index[i][j-1]) + \\\n\t\t\t\t\t\t\t\tself.coefficient[i, j, 1, :channel_num][None, :, None, None] * self.downsample_module['{}_{}'.format(i-1, j)](x_index[i-1][j])\n\n\t\tx_index[i][j+1] = self.rdb_module['{}_{}'.format(i, j)](x_index[i][j])\n\t\tk = j\n\n\t\tfor j in range(self.width // 2 + 1, self.width):\n\t\t\tx_index[i][j] = self.rdb_module['{}_{}'.format(i, j-1)](x_index[i][j-1])\n\n\t\tfor i in range(self.height - 2, -1, -1):\n\t\t\tchannel_num = int(2 ** (i-1) * self.stride * self.depth_rate)\n\t\t\tx_index[i][k+1] = self.coefficient[i, k+1, 0, :channel_num][None, :, None, None] * self.rdb_module['{}_{}'.format(i, k)](x_index[i][k]) + \\\n\t\t\t\t\t\t\t self.coefficient[i, k+1, 1, :channel_num][None, :, None, None] * self.upsample_module['{}_{}'.format(i, k+1)](x_index[i+1][k+1], x_index[i][k].size())\n\n\t\tfor i in range(self.height - 2, -1, -1):\n\t\t\tfor j in range(self.width // 2 + 1, self.width):\n\t\t\t\tchannel_num = int(2 ** (i - 1) * self.stride * self.depth_rate)\n\t\t\t\tx_index[i][j] = self.coefficient[i, j, 0, :channel_num][None, :, None, None] * self.rdb_module['{}_{}'.format(i, j-1)](x_index[i][j-1]) + \\\n\t\t\t\t\t\t\t\tself.coefficient[i, j, 1, :channel_num][None, :, None, None] * self.upsample_module['{}_{}'.format(i, j)](x_index[i+1][j], x_index[i][j-1].size())\n\n\t\tout = self.rdb_out(x_index[i][j])\n\t\t#out = F.relu(self.conv_out(out))\n\n\t\treturn out\n\nclass IPUDN(nn.Module):\n\tdef __init__(self):\n\t\tsuper(IPUDN, self).__init__()\n\t\tself.iteration = 6\n\n\t\tself.conv0 = nn.Sequential(nn.Conv2d(3+1+3+3+1+3, 16, 3, 1, 1), nn.ReLU(True))\n\t\tself.grid = GridDehazeNet()\n\t\tself.conv1 = nn.Conv2d(16, 3, 3, 1, 1)\n\n\t\tself.conv_i = nn.Sequential(nn.Conv2d(16 + 16, 16, 3, 1, 1), nn.Sigmoid())\n\t\tself.conv_f = nn.Sequential(nn.Conv2d(16 + 16, 16, 3, 1, 1), nn.Sigmoid())\n\t\tself.conv_g = nn.Sequential(nn.Conv2d(16 + 16, 16, 3, 1, 1), nn.Tanh())\n\t\tself.conv_o = nn.Sequential(nn.Conv2d(16 + 16, 16, 3, 1, 1), nn.Sigmoid())\n\n\t\tvgg_mean = (0.5, 0.5, 0.5)\n\t\tvgg_std = (0.5, 0.5, 0.5)\n\t\tself.sub_mean = MeanShift(1.0, vgg_mean, vgg_std)\n\n\t\tself.trans_correction = nn.Sequential(\n\t\t\tnn.Conv2d(3+1+3+1, 32, 3, 1, 1),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(32, 32, 3, 1, 1),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(32, 32, 3, 1, 1),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(32, 32, 3, 1, 1),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(32, 32, 3, 1, 1),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(32, 1, 3, 1, 1),\n\t\t\tnn.Tanh()\n\t\t\t)\n\n\t\tself.amb_correction = nn.Sequential(\n\t\t\tnn.Conv2d(3+3+3+3, 32, 3, 1, 1),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(32, 32, 3, 1, 1),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(32, 32, 3, 1, 1),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(32, 32, 3, 1, 1),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(32, 32, 3, 1, 1),\n\t\t\tnn.PReLU(),\n\t\t\tnn.Conv2d(32, 3, 3, 1, 1),\n\t\t\tnn.Tanh(),\n\t\t\tnn.AdaptiveAvgPool2d(1)\n\t\t\t)\n\n\tdef forward(self, x):\n\t\t# x[0]: img; x[1]: trans; x[2]: A\n\t\timg = x[0]\n\t\ttrans = x[1]\n\t\tamb = x[2]\n\n\t\timg = self.sub_mean(img)\n\n\t\tbatch_size, row, col = img.size(0), img.size(2), img.size(3)\n\t\tx = img\n\t\txt = trans\n\t\txa = amb\n\t\th = Variable(torch.zeros(batch_size, 16, row, col).cuda(img.get_device()))\n\t\tc = Variable(torch.zeros(batch_size, 16, row, col).cuda(img.get_device()))\n\n\t\tfor k in range(self.iteration):\n\t\t\tx = torch.cat((img, trans, amb, x, xt, xa), 1)\n\t\t\tx = self.conv0(x)\n\t\t\tx = torch.cat((x, h), 1)\n\t\t\ti = self.conv_i(x)\n\t\t\tf = self.conv_f(x)\n\t\t\tg = self.conv_g(x)\n\t\t\to = self.conv_o(x)\n\t\t\tc = f * c + i * g\n\t\t\th = o * torch.tanh(c)\n\t\t\tx = h\n\t\t\tx = F.relu(self.conv1(self.grid(x)))\n\n\t\t\tif k<self.iteration-1:\n\t\t\t\tx = self.sub_mean(x)\n\n\t\t\t\tdt = self.trans_correction(torch.cat((img, trans, x, xt), 1))\n\t\t\t\txt = xt+dt\n\t\t\t\tda = self.amb_correction(torch.cat((img, amb, x, xa), 1))\n\t\t\t\tda = da.expand_as(img)\n\t\t\t\txa = xa+da\n\t\t\t\n\t\treturn x\n\n\n\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.ConvTranspose2d",
"torch.Tensor",
"torch.cat",
"torch.nn.PReLU",
"torch.nn.ModuleDict",
"torch.nn.Conv2d",
"torch.eye",
"torch.zeros",
"torch.nn.Sigmoid",
"torch.nn.Tanh",
"numpy.ones",
"torch.tanh",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ste93ste/ChestXRayClassification | [
"8568580e2d9a6c178dd9c877a4c302b48854c17c"
] | [
"semi-supervised-pytorch-master/semi-supervised/models/dgm.py"
] | [
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.nn import init\n\nfrom .vae import VariationalAutoencoder\nfrom .vae import Encoder, Decoder\n\ndrop_rate = 0.2\n\nclass Classifier(nn.Module):\n def __init__(self, dims):\n \"\"\"\n Single hidden layer classifier\n with softmax output.\n \"\"\"\n super(Classifier, self).__init__()\n [x_dim, h_dim, y_dim, add] = dims\n self.add = add\n x_dim_new = 512 + self.add\n self.x_dim_new = x_dim_new\n #self.bnInput = nn.BatchNorm1d(225*225+self.add)\n self.conv1 = nn.Conv2d(in_channels=1,\n out_channels=32,\n kernel_size=5,\n stride=2)\n self.bnconv1 = nn.BatchNorm2d(32)\n self.dropconv1 = nn.Dropout2d(drop_rate)\n\n self.conv2 = nn.Conv2d(in_channels=32,\n out_channels=32,\n kernel_size=5,\n stride=2)\n self.bnconv2 = nn.BatchNorm2d(32)\n self.dropconv2 = nn.Dropout2d(drop_rate)\n\n self.conv3 = nn.Conv2d(in_channels=32,\n out_channels=32,\n kernel_size=5,\n stride=2)\n self.bnconv3 = nn.BatchNorm2d(32)\n self.dropconv3 = nn.Dropout2d(drop_rate)\n\n self.conv4 = nn.Conv2d(in_channels=32,\n out_channels=32,\n kernel_size=5,\n stride=2)\n self.bnconv4 = nn.BatchNorm2d(32)\n self.dropconv4 = nn.Dropout2d(drop_rate)\n\n self.conv5 = nn.Conv2d(in_channels=32,\n out_channels=32,\n kernel_size=5,\n stride=2)\n self.bnconv5 = nn.BatchNorm2d(32)\n self.dropconv5 = nn.Dropout2d(drop_rate)\n\n self.dense1 = nn.Linear(x_dim_new, 128)\n self.bn1 = nn.BatchNorm1d(128)\n self.drop1 = nn.Dropout(drop_rate)\n\n self.dense2 = nn.Linear(128, 32)\n self.bn2 = nn.BatchNorm1d(32)\n self.drop2 = nn.Dropout(drop_rate)\n\n self.logits = nn.Linear(32, y_dim)\n\n def forward(self, x):\n x = x.view(-1, 225*225 + self.add)\n #x = self.bnInput(x)\n addValues = x[:, x.shape[1] - self.add : x.shape[1]]\n x = x[:, 0: x.shape[1] - self.add]\n x = x.view(-1, 1, 225, 225)\n x = self.dropconv1(self.bnconv1(F.relu((self.conv1(x)))))\n x = self.dropconv2(self.bnconv2(F.relu((self.conv2(x)))))\n x = self.dropconv3(self.bnconv3(F.relu((self.conv3(x)))))\n x = self.dropconv4(self.bnconv4(F.relu((self.conv4(x)))))\n x = self.dropconv5(self.bnconv5(F.relu((self.conv5(x)))))\n\n x = x.view(-1, self.x_dim_new - self.add)\n\n if len(addValues.size()) != 0:\n x = torch.cat([x, addValues], dim=1) \n\n x = self.drop1(self.bn1(F.relu(self.dense1(x))))\n x = self.drop2(self.bn2(F.relu(self.dense2(x))))\n x = F.softmax(self.logits(x), dim=-1)\n return x\n\nclass DeepGenerativeModel(VariationalAutoencoder):\n def __init__(self, dims):\n \"\"\"\n M2 code replication from the paper\n 'Semi-Supervised Learning with Deep Generative Models'\n (Kingma 2014) in PyTorch.\n\n The \"Generative semi-supervised model\" is a probabilistic\n model that incorporates label information in both\n inference and generation.\n\n Initialise a new generative model\n :param dims: dimensions of x, y, z and hidden layers.\n \"\"\"\n [x_dim, self.y_dim, z_dim, h_dim] = dims\n super(DeepGenerativeModel, self).__init__([x_dim, z_dim, h_dim])\n\n self.encoder = Encoder([x_dim, h_dim, z_dim, self.y_dim])\n self.decoder = Decoder([z_dim + self.y_dim, list(reversed(h_dim)), x_dim])\n self.classifier = Classifier([x_dim, h_dim[0], self.y_dim, 0])\n\n for m in self.modules():\n if isinstance(m, nn.Linear):\n init.xavier_normal_(m.weight.data)\n if m.bias is not None:\n m.bias.data.zero_()\n\n def forward(self, x, y):\n # Add label and data and generate latent variable\n z, z_mu, z_log_var = self.encoder(torch.cat([x, y], dim=1))\n\n self.kl_divergence = self._kld(z, (z_mu, z_log_var))\n\n # Reconstruct data point from latent data and label\n x_mu = self.decoder(torch.cat([z, y], dim=1))\n\n return x_mu\n\n def classify(self, x):\n logits = self.classifier(x)\n return logits\n\n def sample(self, z, y):\n \"\"\"\n Samples from the Decoder to generate an x.\n :param z: latent normal variable\n :param y: label (one-hot encoded)\n :return: x\n \"\"\"\n y = y.float()\n x = self.decoder(torch.cat([z, y], dim=1))\n return x\n\n\nclass AuxiliaryDeepGenerativeModel(DeepGenerativeModel):\n def __init__(self, dims):\n \"\"\"\n Auxiliary Deep Generative Models [Maaløe 2016]\n code replication. The ADGM introduces an additional\n latent variable 'a', which enables the model to fit\n more complex variational distributions.\n\n :param dims: dimensions of x, y, z, a and hidden layers.\n \"\"\"\n [x_dim, y_dim, z_dim, a_dim, h_dim] = dims\n super(AuxiliaryDeepGenerativeModel, self).__init__([x_dim, y_dim, z_dim, h_dim])\n \n self.aux_encoder = Encoder([x_dim, h_dim, a_dim, 0])\n self.aux_decoder = Encoder([x_dim, h_dim, a_dim, z_dim + y_dim,])\n\n self.classifier = Classifier([x_dim, h_dim[0], y_dim, a_dim])\n\n self.encoder = Encoder([x_dim, h_dim, z_dim, a_dim + y_dim])\n self.decoder = Decoder([y_dim + z_dim, list(reversed(h_dim)), x_dim])\n\n def classify(self, x):\n # Auxiliary inference q(a|x)\n a, a_mu, a_log_var = self.aux_encoder(x)\n\n # Classification q(y|a,x)\n logits = self.classifier(torch.cat([x, a], dim=1))\n return logits\n\n def forward(self, x, y):\n \"\"\"\n Forward through the model\n :param x: features\n :param y: labels\n :return: reconstruction\n \"\"\"\n # Auxiliary inference q(a|x)\n q_a, q_a_mu, q_a_log_var = self.aux_encoder(x)\n\n # Latent inference q(z|a,y,x)\n z, z_mu, z_log_var = self.encoder(torch.cat([x, y, q_a], dim=1))\n\n # Generative p(x|z,y)\n x_mu, x_var = self.decoder(torch.cat([z, y], dim=1))\n\n # Generative p(a|z,y,x)\n p_a, p_a_mu, p_a_log_var = self.aux_decoder(torch.cat([x, y, z], dim=1))\n\n a_kl = self._kld(q_a, (q_a_mu, q_a_log_var), (p_a_mu, p_a_log_var))\n z_kl = self._kld(z, (z_mu, z_log_var))\n\n self.kl_divergence = a_kl + z_kl\n\n return x_mu, x_var, z\n\n"
] | [
[
"torch.nn.BatchNorm1d",
"torch.nn.Dropout",
"torch.nn.Dropout2d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.init.xavier_normal_",
"torch.nn.Linear",
"torch.nn.BatchNorm2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sephwalker321/DashAnomalyDetection | [
"8e3744d32e762cd6d4258b853ff6254b8b103113"
] | [
"codes/Misc.py"
] | [
"\"\"\"============================================================================\nModule of miscelaneous functions used throughout.\n\nContents:\n- Fitted Parameters Return\n- Useful\n- Time Functions\n- Window Functions\n- ErrorCalcs\n- String Key Search\n\nAuthor: Joseph Walker [email protected]\n============================================================================\"\"\"\nimport scipy\nimport numpy as np\nimport pandas as pd\nimport time\nimport os\n\nimport scipy\nfrom sklearn import decomposition\nfrom sklearn.preprocessing import StandardScaler\n\n#For manipulating datetime format\nfrom datetime import datetime\nfrom datetime import timedelta\n\n################################################################################################################\n# Functions:\n################################################################################################################\n\n#######################\n# Fitted Parameters Return\n#######################\n\ndef GetOptimzed(PCA_FittedParams, Type, StringSearch, Metric, Vars):\n\tPowList = IndexPattern(\"BF604\", Vars)\n\tTherList = IndexPattern(\"RP209\", Vars)\n\tGagList = IndexPattern(\"GA201\", Vars)\n\tif len(PowList) > 0 and len(TherList) > 0 and len(GagList) > 0:\n\t\tStringSearch = \"CGO\"\n\tif PCA_FittedParams[Type] == None:\n\t\tPCA_NPCA = PCA_FittedParams[\"None\"][\"NPCA\"]\n\t\tPCA_AlphaLim = PCA_FittedParams[\"None\"][\"AlphaLim\"]\n\t\tPCA_Thresh = PCA_FittedParams[\"None\"][\"Thresh\"]\n\telif StringSearch in PCA_FittedParams[Type].keys():\n\t\tPCA_NPCA = PCA_FittedParams[Type][StringSearch][Metric][\"NPCA\"]\n\t\tPCA_AlphaLim = PCA_FittedParams[Type][StringSearch][Metric][\"AlphaLim\"]\n\t\tPCA_Thresh = PCA_FittedParams[Type][StringSearch][Metric][\"Thresh\"]\n\telse:\n\t\tPCA_NPCA = PCA_FittedParams[\"None\"][\"NPCA\"]\n\t\tPCA_AlphaLim = PCA_FittedParams[\"None\"][\"AlphaLim\"]\n\t\tPCA_Thresh = PCA_FittedParams[\"None\"][\"Thresh\"]\n\treturn PCA_NPCA, PCA_AlphaLim, PCA_Thresh\n\n#######################\n# Useful\n#######################\n\t\t\ndef round_sig(x, sig=2):\n\t''' Rounds to sig fig.\n\tx: variable to be rounded\n\tsig: Num sig figs\n\tReturn\n\trounded: The rounded number\n\t'''\n\tif isinstance(x, (float, int)):\n\t\tif np.isfinite(x) and x != 0:\n\t\t\treturn round(x, sig-int(np.floor(np.log10(abs(x))))-1)\n\n\t\telse:\n\t\t\treturn x\n\telse:\n\t\t\treturn x\n\t\n#######################\n# Time Functions\n#######################\n\ndef unixTimeMillis(dt):\n\t''' Convert datetime to unix timestamp.\n\tdt : datetime format,\n\tReturn,\n\tUnix : The unix time stamp'''\n\treturn int(dt.timestamp())\n\t#return time.mktime(dt.timetuple())\n\ndef unixToDatetime(unix):\n\t''' Convert unix timestamp to datetime. \n\tunix: unix time format,\n\tReturn,\n\ttimestamp : The time stamp in datatime format'''\n\treturn pd.to_datetime(unix,unit='s')\n\ndef getMarksDates(start, end, dates, NPoints=5):\n\t''' Returns the marks for labeling.\n\tstart: First date in list,\n\tend: Last date in list,\n\tdates: List of all dates with plotted point,\n\tNPoints: How many marks to include,\n\tReturn,\n\tresult: The tick marks for the plots'''\n\tresult = {}\n\tNth = np.linspace(0,len(dates),NPoints,dtype = int, endpoint=True)\n\tNth[-1] -= 1\n\tfor i, date in enumerate(dates):\n\t\tif i in list(Nth):\n\t\t\t# Append value to dict\n\t\t\tresult[unixTimeMillis(date)] = str(date.strftime('%d/%m/%Y'))\n\treturn result\n\ndef getDateTimeFromString(string):\n\t''' Convert datetime string to datetime object using a list of formats to decode\n\tstring: datatime string\n\tReturn,\n\tt: datetime object.\n\t'''\n\tfmts = [\"%Y-%m-%d %H:%M\", \"%Y-%m-%d %H\", \"%Y-%m-%d\", \"%Y-%m-%d %H:%M:%S\"]\n\tfor fmt in fmts:\n\t\ttry:\n\t\t\tt = datetime.strptime(string, fmt)\n\t\t\tbreak\n\t\texcept ValueError as err:\n\t\t\tpass\n\treturn t\n\n#######################\n# Window Functions\n#######################\n\ndef WindowSize(Rate, RateUnit, Duration, NDuration):\n\t''' Return how many datepoints in data for a given hourly rate to get the required duration\n\tRate: The sample rate,\n\tRateUnit: The units of the rate\n\tDuration: string [second, minute, hour, day, week] duration multiple\n\tNDuration: N times the Duration eg. 2 * weeks\n\tReturn,\n\tN: Int number of datapoints to be sampled\n\t'''\n\tif Duration in [\"S\",\"second\", \"seconds\"]:\n\t\tdt = NDuration*timedelta(seconds=1)\n\tif Duration in [\"mins\",\"minute\", \"minutes\"]:\n\t\tdt = NDuration*timedelta(minutes=1)\n\tif Duration in [\"hours\",\"hour\"]:\n\t\tdt = NDuration*timedelta(hours=1)\n\tif Duration in [\"days\",\"day\"]:\n\t\tdt = NDuration*timedelta(days=1)\n\tif Duration in [\"weeks\",\"week\"]:\n\t\tdt = NDuration*timedelta(weeks=1)\n\tif RateUnit in [\"S\",\"second\", \"seconds\"]:\n\t\treturn int(dt / (timedelta(seconds=1)*Rate))\n\tif RateUnit in [\"mins\",\"minute\", \"minutes\"]:\n\t\treturn int(dt / (timedelta(minutes=1)*Rate))\n\tif RateUnit in [\"hours\",\"hour\"]:\n\t\treturn int(dt / (timedelta(hours=1)*Rate))\n\tif RateUnit in [\"days\",\"day\"]:\n\t\treturn int(dt / (timedelta(days=1)*Rate))\n\tif RateUnit in [\"weeks\",\"week\"]:\n\t\treturn int(dt / (timedelta(weeks=1)*Rate))\n\ndef CalcUnits(Rate, RateUnit):\n\t''' Return units into years\n\tRate: The sample rate,\n\tRateUnit: The units of the rate\n\tDuration: string [second, minute, hour, day, week] duration multiple\n\tReturn,\n\tunits: rate in years\n\t'''\n\tif RateUnit in [\"S\",\"second\", \"seconds\"]:\n\t\treturn ((3600*24*365)/Rate)\n\tif RateUnit in [\"mins\",\"minute\", \"minutes\"]:\n\t\treturn ((60*24*365)/Rate)\n\tif RateUnit in [\"hours\",\"hour\"]:\n\t\treturn ((1*24*365)/Rate)\n\tif RateUnit in [\"days\",\"day\"]:\n\t\treturn ((365)/Rate)\n\tif RateUnit in [\"week\",\"weeks\"]:\n\t\treturn ((52)/Rate)\n\ndef timedeltaOneUnit(Rate,RateUnit):\n\t''' Return a timedelta element of one sample\n\tRate: The sample rate,\n\tRateUnit: The units of the rate\n\tReturn,\n\ttimedelta: A timedelta of one sample\n\t'''\n\tif RateUnit in [\"S\",\"second\", \"seconds\"]:\n\t\treturn Rate*timedelta(seconds=1)\n\tif RateUnit in [\"mins\",\"minute\", \"minutes\"]:\n\t\treturn Rate*timedelta(minutes=1)\n\tif RateUnit in [\"hours\",\"hour\"]:\n\t\treturn Rate*timedelta(hours=1)\n\tif RateUnit in [\"days\",\"day\"]:\n\t\treturn Rate*timedelta(days=1)\n\tif RateUnit in [\"weeks\",\"week\"]:\n\t\treturn Rate*timedelta(weeks=1)\n\t\n#######################\n# ErrorCalcs\n#######################\n\ndef errCalculation(data, RateUnit, Mean=False):\n\t''' The error estimation on the raw data,\n\tdata: np.array of data\n\tRateUnit: The units of the rate,\n\tMean: 0 if return array, 1 if return mean to get scalar value.\n\tReturn,\n\terrs: np. array of estimated errors\n\t'''\n\t#Assuming data is sampled at rate of once per second. Typical of most.\n\t#How many measurments?\n\tif RateUnit in [\"S\",\"second\", \"seconds\"]:\n\t\tN = 1\n\tif RateUnit in [\"mins\",\"minute\", \"minutes\"]:\n\t\tN = 60\n\tif RateUnit in [\"hours\",\"hour\"]:\n\t\tN = 3600\n\tif RateUnit in [\"days\",\"day\"]:\n\t\tN = 3600*24\t\n\tif Mean == False:\n\t\treturn data[\"Error\"].values #/ np.sqrt(N)\n\tif Mean == True:\n\t\treturn np.nanmean(data[\"Error\"].values )#/ np.sqrt(N))\n\n#######################\n# String Key Search\n#######################\n\ndef IndexPattern(Key, List):\n\t''' Functions to pull out strings matching to \"key\" in list, List.\n\tKey: String key to search for,\n\tList: List to look through for strings with substring \"Key\",\n\tReturn,\n\tL: List of items in List with Key in strings'''\n\tif Key not in [None]:\n\t\treturn [s for s in List if Key in s]\n\telse:\n\t\treturn []\n\ndef PatternsList(StrSearch, Files):\n\t'''TODO\n\t'''\n\tStrSep = StrSearch.split(\",\")+StrSearch.split(\" ,\")+StrSearch.split(\", \")\n\tKeySelected=[]\n\tfor i in range(len(StrSep)):\n\t\tif StrSep[i] not in [\"\", \" \"]:\n\t\t\tKeySelected += IndexPattern(StrSep[i], Files)\n\tKeySelected = sorted(list(set(KeySelected)))\n\treturn KeySelected\n\t\t\t\nif __name__ == \"__main__\":\n\tprint(\"Run as module\") "
] | [
[
"pandas.to_datetime",
"numpy.nanmean",
"numpy.isfinite"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
hammadkhann/Towards-richer-representation-of-CLS-token-for-Neural-Reranking | [
"8989e9460ee9e7e78269712d536ba818a63b4fe4"
] | [
"ColBERT_mtl/colbert/modeling/colbert.py"
] | [
"import string\nimport torch\nimport torch.nn as nn\nimport numpy as np\n\nfrom transformers import BertPreTrainedModel, BertModel, BertTokenizerFast\nfrom colbert.parameters import DEVICE\n\n\nclass ColBERT(BertPreTrainedModel):\n def __init__(self, config, query_maxlen, doc_maxlen, mask_punctuation, dim=128, similarity_metric='cosine'):\n\n super(ColBERT, self).__init__(config)\n\n self.query_maxlen = query_maxlen\n self.doc_maxlen = doc_maxlen\n self.similarity_metric = similarity_metric\n self.dim = dim\n\n self.mask_punctuation = mask_punctuation\n self.skiplist = {}\n\n if self.mask_punctuation:\n self.tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')\n self.skiplist = {w: True\n for symbol in string.punctuation\n for w in [symbol, self.tokenizer.encode(symbol, add_special_tokens=False)[0]]}\n\n self.bert = BertModel(config)\n self.linear = nn.Linear(config.hidden_size, dim, bias=False)\n\n self.init_weights()\n\n def forward(self, Q, D, Q_mask=None, D_mask=None, cls_Q_mask=None, cls_D_mask=None, token_overlap=None):\n Q, q_input_ids = self.query(*Q)\n D, d_input_ids = self.doc(*D)\n token_overlap = self.tensor_intersect(q_input_ids, d_input_ids)\n return self.score(Q, D, Q_mask, D_mask, cls_Q_mask, cls_D_mask, token_overlap)\n\n @staticmethod\n def tensor_intersect(Q, D):\n D = D[1].cpu().detach().numpy()\n Q = Q[1].cpu().detach().numpy()\n return 10*(len(np.intersect1d(Q, D)) / np.count_nonzero(D))\n\n def query(self, input_ids, attention_mask):\n input_ids, attention_mask = input_ids.to(DEVICE), attention_mask.to(DEVICE)\n Q = self.bert(input_ids, attention_mask=attention_mask)[0]\n Q = self.linear(Q)\n\n return torch.nn.functional.normalize(Q, p=2, dim=2), input_ids\n\n def doc(self, input_ids, attention_mask, keep_dims=True):\n input_ids, attention_mask = input_ids.to(DEVICE), attention_mask.to(DEVICE)\n D = self.bert(input_ids, attention_mask=attention_mask)[0]\n D = self.linear(D)\n\n mask = torch.tensor(self.mask(input_ids), device=DEVICE).unsqueeze(2).float()\n D = D * mask\n\n D = torch.nn.functional.normalize(D, p=2, dim=2)\n\n if not keep_dims:\n D, mask = D.cpu().to(dtype=torch.float16), mask.cpu().bool().squeeze(-1)\n D = [d[mask[idx]] for idx, d in enumerate(D)]\n\n return D, input_ids\n\n def old_score(self, Q, D, Q_mask=None, D_mask=None):\n if self.similarity_metric == 'cosine':\n if Q_mask is not None:\n Q = Q[:, Q_mask, :]\n if D_mask is not None:\n D = D[:, D_mask, :]\n return (Q @ D.permute(0, 2, 1)).max(2).values.sum(1)\n\n assert self.similarity_metric == 'l2'\n return (-1.0 * ((Q.unsqueeze(2) - D.unsqueeze(1)) ** 2).sum(-1)).max(-1).values.sum(-1)\n\n def score(self, Q, D, Q_mask=None, D_mask=None, cls_Q_mask=None, cls_D_mask=None, token_overlap=None):\n if self.similarity_metric == 'cosine':\n if Q_mask is not None:\n Q_tok = Q[:, Q_mask, :]\n if D_mask is not None:\n D_tok = D[:, D_mask, :]\n if cls_Q_mask is not None:\n Q_cls = Q[:, cls_Q_mask, :]\n if cls_D_mask is not None:\n D_cls = D[:, cls_D_mask, :]\n\n score_token = (Q_tok @ D_tok.permute(0, 2, 1)).max(2).values.sum(1)\n score_cls = (Q_cls @ D_cls.permute(0, 2, 1)).max(2).values.sum(1)\n\n return score_token, score_cls\n\n assert self.similarity_metric == 'l2'\n return (-1.0 * ((Q.unsqueeze(2) - D.unsqueeze(1))**2).sum(-1)).max(-1).values.sum(-1)\n\n def mask(self, input_ids):\n mask = [[(x not in self.skiplist) and (x != 0) for x in d] for d in input_ids.cpu().tolist()]\n return mask\n"
] | [
[
"torch.nn.functional.normalize",
"torch.nn.Linear",
"numpy.intersect1d",
"numpy.count_nonzero"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
StefanTippelt/deep-dream-viz | [
"027ff9132e57900168c058e14e62a2842d7d0bee"
] | [
"src/image_optimize.py"
] | [
"########################################################################\n# Published under the MIT License. See the file LICENSE for details.\n#\n# Copyright 2016 by Magnus Erik Hvass Pedersen\n########################################################################\n\n# Code was adjusted to my own needs by removing unnecessary output of\n# intermediate steps and passing model and session as parameters to the\n# functions, Stefan Tippelt\n########################################################################\n\n# import math\nimport numpy as np\n# import random\nimport logging\n\nlogging.basicConfig(filename='logfile.log', level=logging.INFO)\n\n\n# Image manipulation.\nimport PIL.Image\nfrom scipy.ndimage.filters import gaussian_filter\n\n# Custom functions\nfrom utils import tiled_gradient, resize_image\n\n\ndef optimize_image(layer_tensor, image, model, session,\n num_iterations=10, step_size=3.0, tile_size=400):\n \"\"\"\n Use gradient ascent to optimize an image so it maximizes the\n mean value of the given layer_tensor.\n\n Parameters:\n layer_tensor: Reference to a tensor that will be maximized.\n image: Input image used as the starting point.\n num_iterations: Number of optimization iterations to perform.\n step_size: Scale for each step of the gradient ascent.\n tile_size: Size of the tiles when calculating the gradient.\n show_gradient: Plot the gradient in each iteration.\n \"\"\"\n # Copy the image so we don't overwrite the original image.\n img = image.copy()\n\n # logging.INFO('Processing image: ')\n\n # Use TensorFlow to get the mathematical function for the\n # gradient of the given layer-tensor with regard to the\n # input image. This may cause TensorFlow to add the same\n # math-expressions to the graph each time this function is called.\n # It may use a lot of RAM and could be moved outside the function.\n gradient = model.get_gradient(layer_tensor)\n\n for i in range(num_iterations):\n # Calculate the value of the gradient.\n # This tells us how to change the image so as to\n # maximize the mean of the given layer-tensor.\n grad = tiled_gradient(model=model, session=session, gradient=gradient, image=img)\n\n # Blur the gradient with different amounts and add\n # them together. The blur amount is also increased\n # during the optimization. This was found to give\n # nice, smooth images. You can try and change the formulas.\n # The blur-amount is called sigma (0=no blur, 1=low blur, etc.)\n # We could call gaussian_filter(grad, sigma=(sigma, sigma, 0.0))\n # which would not blur the colour-channel. This tends to\n # give psychadelic / pastel colours in the resulting images.\n # When the colour-channel is also blurred the colours of the\n # input image are mostly retained in the output image.\n sigma = (i * 4.0) / num_iterations + 0.5\n grad_smooth1 = gaussian_filter(grad, sigma=sigma)\n grad_smooth2 = gaussian_filter(grad, sigma=sigma * 2)\n grad_smooth3 = gaussian_filter(grad, sigma=sigma * 0.5)\n grad = (grad_smooth1 + grad_smooth2 + grad_smooth3)\n\n # Scale the step-size according to the gradient-values.\n # This may not be necessary because the tiled-gradient\n # is already normalized.\n step_size_scaled = step_size / (np.std(grad) + 1e-8)\n\n # Update the image by following the gradient.\n img += grad * step_size_scaled\n\n return img\n\n\ndef recursive_optimize(layer_tensor, image, model, session,\n num_repeats=4, rescale_factor=0.7, blend=0.2,\n num_iterations=10, step_size=3.0,\n tile_size=400):\n \"\"\"\n Recursively blur and downscale the input image.\n Each downscaled image is run through the optimize_image()\n function to amplify the patterns that the Inception model sees.\n\n Parameters:\n image: Input image used as the starting point.\n rescale_factor: Downscaling factor for the image.\n num_repeats: Number of times to downscale the image.\n blend: Factor for blending the original and processed images.\n\n Parameters passed to optimize_image():\n layer_tensor: Reference to a tensor that will be maximized.\n num_iterations: Number of optimization iterations to perform.\n step_size: Scale for each step of the gradient ascent.\n tile_size: Size of the tiles when calculating the gradient.\n \"\"\"\n # Do a recursive step?\n if num_repeats > 0:\n # Blur the input image to prevent artifacts when downscaling.\n # The blur amount is controlled by sigma. Note that the\n # colour-channel is not blurred as it would make the image gray.\n sigma = 0.5\n img_blur = gaussian_filter(image, sigma=(sigma, sigma, 0.0))\n\n # Downscale the image.\n img_downscaled = resize_image(image=img_blur,\n factor=rescale_factor)\n\n # Recursive call to this function.\n # Subtract one from num_repeats and use the downscaled image.\n img_result = recursive_optimize(layer_tensor=layer_tensor,\n image=img_downscaled,\n model=model,\n session=session,\n num_repeats=num_repeats - 1,\n rescale_factor=rescale_factor,\n blend=blend,\n num_iterations=num_iterations,\n step_size=step_size,\n tile_size=tile_size)\n\n # Upscale the resulting image back to its original size.\n img_upscaled = resize_image(image=img_result, size=image.shape)\n\n # Blend the original and processed images.\n image = blend * image + (1.0 - blend) * img_upscaled\n\n # logging.INFO('Recursive level: ', num_repeats)\n\n # Process the image using the DeepDream algorithm.\n img_result = optimize_image(layer_tensor=layer_tensor,\n image=image,\n model=model,\n session=session,\n num_iterations=num_iterations,\n step_size=step_size,\n tile_size=tile_size)\n\n return img_result\n"
] | [
[
"numpy.std",
"scipy.ndimage.filters.gaussian_filter"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"0.15",
"1.4",
"0.16",
"1.0",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"0.10",
"0.17",
"1.3"
],
"tensorflow": []
}
] |
beyondacm/carefree-learn | [
"a9c69141163c04a16aba8317febe7a66218510b6"
] | [
"cflearn/pipeline.py"
] | [
"import os\nimport json\nimport torch\nimport shutil\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.patches as mpatch\n\nfrom typing import *\nfrom tqdm import tqdm\nfrom cfdata.tabular import TabularDataset\nfrom cftool.ml import ModelPattern\nfrom cftool.misc import show_or_save\nfrom cftool.misc import shallow_copy_dict\nfrom cftool.misc import lock_manager\nfrom cftool.misc import timing_context\nfrom cftool.misc import Saving\nfrom cftool.misc import LoggingMixin\n\ntry:\n amp: Optional[Any] = torch.cuda.amp\nexcept:\n amp = None\n\nfrom .types import data_type\nfrom .configs import Elements\nfrom .configs import Environment\nfrom .trainer import Trainer\nfrom .trainer import IntermediateResults\nfrom .protocol import DataProtocol\nfrom .protocol import PrefetchLoader\nfrom .protocol import DataLoaderProtocol\nfrom .inference import Inference\nfrom .inference import PreProcessor\nfrom .misc._api import _fetch_saving_paths\nfrom .misc.toolkit import to_2d\nfrom .misc.toolkit import to_relative\nfrom .misc.toolkit import eval_context\nfrom .misc.toolkit import LoggingMixinWithRank\nfrom .misc.time_series import TSLabelCollator\nfrom .models.base import model_dict\nfrom .models.base import ModelBase\nfrom .models.base import PipeConfig\n\n\nkey_type = Tuple[Union[str, Optional[str]], ...]\n\n\nclass Pipeline(LoggingMixinWithRank):\n config_bundle_name = \"config_bundle\"\n\n def __init__(self, environment: Environment):\n # typing\n self.tr_data: DataProtocol\n self.cv_data: Optional[DataProtocol]\n self.tr_loader: DataLoaderProtocol\n self.tr_loader_copy: DataLoaderProtocol\n self.cv_loader: Optional[DataLoaderProtocol]\n # common\n self.environment = environment\n self.device = environment.device\n self.model: Optional[ModelBase] = None\n self.inference: Optional[Inference]\n LoggingMixin.reset_logging()\n self.config = environment.pipeline_config\n self.model_type = environment.model\n self.timing = self.config.setdefault(\"use_timing_context\", True)\n self.data_config[\"use_timing_context\"] = self.timing\n self.data_config[\"default_categorical_process\"] = \"identical\"\n self.sampler_config = self.config.setdefault(\"sampler_config\", {})\n\n def __getattr__(self, item: str) -> Any:\n return self.environment.config.get(item)\n\n def __str__(self) -> str:\n return f\"{type(self.model).__name__}()\" # type: ignore\n\n __repr__ = __str__\n\n @property\n def data(self) -> DataProtocol:\n return self._original_data\n\n @property\n def train_set(self) -> TabularDataset:\n raw = self.tr_data.raw\n return TabularDataset(*raw.xy, task_type=self.tr_data.task_type)\n\n @property\n def valid_set(self) -> Optional[TabularDataset]:\n if self.cv_data is None:\n return None\n raw = self.cv_data.raw\n return TabularDataset(*raw.xy, task_type=self.cv_data.task_type)\n\n @property\n def int_cv_split(self) -> int:\n if isinstance(self.cv_split, int):\n return self.cv_split\n num_data = len(self._original_data)\n if self.cv_split is not None:\n return int(round(self.cv_split * num_data))\n default_cv_split = 0.1\n cv_split_num = int(round(default_cv_split * num_data))\n cv_split_num = max(self.min_cv_split, cv_split_num)\n max_cv_split = int(round(num_data * self.max_cv_split_ratio))\n max_cv_split = min(self.max_cv_split, max_cv_split)\n return min(cv_split_num, max_cv_split)\n\n @property\n def binary_threshold(self) -> Optional[float]:\n if self.inference is None:\n raise ValueError(\"`inference` is not yet generated\")\n return self.inference.binary_threshold\n\n @property\n def user_config(self) -> Dict[str, Any]:\n return shallow_copy_dict(self.environment.user_config)\n\n @property\n def user_inc_config(self) -> Dict[str, Any]:\n return shallow_copy_dict(self.environment.user_increment_config)\n\n def _init_data(self) -> None:\n if not self.data.is_ts:\n self.ts_label_collator = None\n else:\n self.ts_label_collator = TSLabelCollator(\n self.data,\n self.ts_label_collator_config,\n )\n self.sampler_config.setdefault(\"verbose_level\", self.data._verbose_level)\n self.preprocessor = PreProcessor(\n self._original_data,\n self.loader_protocol,\n self.sampler_protocol,\n self.sampler_config,\n )\n tr_sampler = self.preprocessor.make_sampler(\n self.tr_data,\n self.shuffle_tr,\n self.tr_weights,\n )\n self.tr_loader = DataLoaderProtocol.make(\n self.loader_protocol,\n self.batch_size,\n tr_sampler,\n return_indices=True,\n verbose_level=self._verbose_level,\n label_collator=self.ts_label_collator,\n )\n if self.cv_data is None:\n self.cv_loader = None\n else:\n cv_sampler = self.preprocessor.make_sampler(self.cv_data, False)\n self.cv_loader = DataLoaderProtocol.make(\n self.loader_protocol,\n self.cv_batch_size,\n cv_sampler,\n return_indices=True,\n verbose_level=self._verbose_level,\n label_collator=self.ts_label_collator,\n )\n self.cv_loader.enabled_sampling = False\n # tr loader copy\n self.tr_loader_copy = self.tr_loader.copy()\n self.tr_loader_copy.enabled_sampling = False\n self.tr_loader_copy.sampler.shuffle = False\n\n def _prepare_modules(\n self,\n *,\n is_loading: bool = False,\n loaded_registered_pipes: Optional[Dict[str, PipeConfig]] = None,\n ) -> None:\n # logging\n if not is_loading:\n if os.path.isdir(self.logging_folder):\n if os.listdir(self.logging_folder):\n print(\n f\"{self.warning_prefix}'{self.logging_folder}' already exists, \"\n \"it will be cleared up to store our logging\"\n )\n shutil.rmtree(self.logging_folder)\n os.makedirs(self.logging_folder)\n self._init_logging(self.verbose_level, self.trigger_logging)\n # model\n with timing_context(self, \"init model\", enable=self.timing):\n self.model = model_dict[self.model_type](\n self.environment,\n self.tr_loader_copy,\n self.cv_loader,\n self.tr_weights,\n self.cv_weights,\n loaded_registered_pipes,\n )\n self.model.init_ema()\n # trainer\n with timing_context(self, \"init trainer\", enable=self.timing):\n if self.preprocessor is None:\n msg = \"`preprocessor` is not defined. Please call `_init_data` first\"\n raise ValueError(msg)\n self.inference = Inference(\n self.preprocessor,\n model=self.model,\n binary_config=self.binary_config,\n use_binary_threshold=self.use_binary_threshold,\n use_tqdm=self.use_tqdm,\n )\n self.trainer = Trainer(\n self.model,\n self.inference,\n self.environment,\n is_loading,\n )\n # to device\n with timing_context(self, \"init device\", enable=self.timing):\n self.model.to(self.device)\n\n def _before_loop(\n self,\n x: data_type,\n y: data_type,\n x_cv: data_type,\n y_cv: data_type,\n sample_weights: np.ndarray,\n ) -> None:\n # data\n y, y_cv = map(to_2d, [y, y_cv])\n args = (x, y) if y is not None else (x,)\n self.data_config[\"verbose_level\"] = self._verbose_level\n if sample_weights is None:\n self.sample_weights = None\n else:\n self.sample_weights = sample_weights.copy()\n self._original_data = DataProtocol.make(self.data_protocol, **self.data_config)\n self._original_data.read(*args, **self.read_config)\n self.tr_data = self._original_data\n self._save_original_data = x_cv is None\n self.tr_weights = self.cv_weights = None\n if x_cv is not None:\n self.cv_data = self.tr_data.copy_to(x_cv, y_cv)\n if sample_weights is not None:\n self.tr_weights = sample_weights[: len(self.tr_data)]\n self.cv_weights = sample_weights[len(self.tr_data) :]\n else:\n if self.int_cv_split <= 0:\n self.cv_data = None\n self.tr_split_indices = None\n self.cv_split_indices = None\n if sample_weights is not None:\n self.tr_weights = sample_weights\n else:\n split = self.tr_data.split(self.int_cv_split, order=self.cv_split_order)\n self.tr_data, self.cv_data = split.remained, split.split\n self.tr_split_indices = split.remained_indices\n self.cv_split_indices = split.split_indices\n if sample_weights is not None:\n self.tr_weights = sample_weights[split.remained_indices]\n self.cv_weights = sample_weights[split.split_indices]\n # deep speed\n self.set_rank_0(self.environment.is_rank_0)\n # data\n self._init_data()\n # modules\n self._prepare_modules()\n # deep speed\n self.set_rank_0(self.is_rank_0)\n\n def _handle_pretrain(\n self,\n strict: bool,\n folder: Optional[str],\n identifier: Optional[str],\n state_dict_callback: Optional[Callable[[Dict[str, Any]], None]],\n ) -> None:\n if identifier is None:\n return None\n paths_dict = _fetch_saving_paths(identifier, folder)\n all_paths: List[str] = sum(paths_dict.values(), [])\n if len(all_paths) > 1:\n raise ValueError(\"more than 1 model is detected\")\n path = all_paths[0]\n folder = folder or \"./\"\n compress = os.path.isfile(f\"{path}.zip\")\n with lock_manager(folder, [path]):\n with Saving.compress_loader(path, compress):\n self.trainer.restore_checkpoint(path, strict, state_dict_callback)\n\n def _loop(self) -> None:\n # dump information\n logging_folder = self.logging_folder\n os.makedirs(logging_folder, exist_ok=True)\n if self.is_rank_0:\n if self.environment.deepspeed:\n logging_folder = os.path.join(logging_folder, os.pardir)\n Saving.save_dict(self.config, \"__config__\", logging_folder)\n with open(os.path.join(logging_folder, \"__model__.txt\"), \"w\") as f:\n f.write(str(self.model))\n # training loop\n self.trainer.fit(\n self.tr_loader,\n self.tr_loader_copy,\n self.cv_loader,\n self.tr_weights,\n self.cv_weights,\n )\n self.log_timing()\n\n @staticmethod\n def _rectangle(\n ax: Any,\n x: float,\n y: float,\n width: float,\n height: float,\n color: Any,\n text: str,\n ) -> Tuple[float, float]:\n rectangle = mpatch.Rectangle(\n (x, y),\n width,\n height,\n color=color,\n alpha=0.8,\n ec=\"#000000\",\n )\n ax.add_artist(rectangle)\n rx, ry = rectangle.get_xy()\n cx = rx + 0.5 * rectangle.get_width()\n cy = ry + 0.5 * rectangle.get_height()\n ax.annotate(\n text,\n (cx, cy),\n color=\"black\",\n fontsize=16,\n ha=\"center\",\n va=\"center\",\n )\n return cx, cy\n\n @staticmethod\n def _arrow(\n ax: Any,\n lx: float,\n ly: float,\n rx: float,\n ry: float,\n half_box_width: float,\n ) -> None:\n lx += half_box_width\n rx -= half_box_width\n ax.annotate(\n text=\"\",\n xy=(rx, ry),\n xytext=(lx, ly),\n xycoords=\"data\",\n arrowprops=dict(arrowstyle=\"->\", color=\"black\"),\n )\n\n @staticmethod\n def _box_msg(key: key_type, delim: str) -> str:\n scope, meta, config = key[1:]\n if meta is None:\n scope_str = \"\"\n extractor_str = f\"{scope}_{config}\"\n else:\n scope_str = f\"\\n{scope}\"\n extractor_str = f\"{meta}_{config}\"\n return f\"Extractor\\n{delim}\\n{extractor_str}{scope_str}\"\n\n @staticmethod\n def _rectangles(\n ax: Any,\n color: Any,\n x: float,\n y_max: float,\n box_width: float,\n box_height: float,\n delim: str,\n keys: List[key_type],\n positions: Dict[key_type, Tuple[float, float]],\n ) -> None:\n for i, key in enumerate(keys):\n y = (i + 0.5) * (y_max / len(keys))\n args = ax, x, y, box_width, box_height, color\n cx, cy = Pipeline._rectangle(*args, Pipeline._box_msg(key, delim))\n positions[key] = cx, cy\n\n # api\n\n def draw(\n self,\n export_path: Optional[str] = None,\n *,\n transparent: bool = True,\n ) -> \"Pipeline\":\n pipes = model_dict[self.model_type].registered_pipes\n if pipes is None:\n raise ValueError(\"pipes have not yet been registered\")\n transforms_mapping: Dict[str, str] = {}\n extractors_mapping: Dict[str, key_type] = {}\n heads_mapping: Dict[str, key_type] = {}\n sorted_keys = sorted(pipes)\n for key in sorted_keys:\n pipe_cfg = pipes[key]\n transforms_mapping[key] = pipe_cfg.transform\n extractor_key: key_type = (\n pipe_cfg.transform,\n pipe_cfg.extractor,\n pipe_cfg.extractor_meta_scope,\n pipe_cfg.extractor_config,\n )\n if not pipe_cfg.reuse_extractor:\n cursor = 0\n new_extractor_key: key_type = extractor_key\n while new_extractor_key in extractors_mapping.values():\n cursor += 1\n new_extractor_key = extractor_key + (str(cursor),)\n extractor_key = new_extractor_key\n extractors_mapping[key] = extractor_key\n heads_mapping[key] = (\n key,\n pipe_cfg.head,\n pipe_cfg.head_meta_scope,\n pipe_cfg.head_config,\n )\n unique_transforms = sorted(set(transforms_mapping.values()))\n unique_extractors = sorted(set(extractors_mapping.values()))\n all_heads = sorted(heads_mapping.values())\n\n box_width = 0.5\n box_height = 0.4\n half_box_width = 0.5 * box_width\n x_scale, y_scale = 6, 5\n x_positions = [0, 0.75, 1.5, 2.25]\n y_gap = box_height * 2.5\n x_min, x_max = x_positions[0], x_positions[-1]\n x_diff = x_max - x_min\n nodes_list = [unique_transforms, unique_extractors, all_heads]\n y_max = float(max(map(len, nodes_list))) * y_gap # type: ignore\n y_max = max(1.5, y_max)\n fig = plt.figure(dpi=100, figsize=[(x_diff + 2.0) * x_scale, y_max * y_scale])\n\n ax = fig.add_subplot(111)\n if transparent:\n fig.patch.set_alpha(0)\n ax.patch.set_alpha(0)\n ax.tick_params(labelbottom=False, bottom=False)\n ax.tick_params(labelleft=False, left=False)\n ax.spines[\"right\"].set_visible(False)\n ax.spines[\"top\"].set_visible(False)\n ax.spines[\"left\"].set_visible(False)\n ax.spines[\"bottom\"].set_visible(False)\n colors = plt.cm.Paired([i / 6 for i in range(4)])\n\n # rectangle\n delim = \"-\" * 16\n color = colors[0]\n x = x_positions[0]\n transform_positions = {}\n extractor_positions: Dict[key_type, Tuple[float, float]] = {}\n head_positions: Dict[key_type, Tuple[float, float]] = {}\n for i, transform in enumerate(unique_transforms):\n y = (i + 0.5) * (y_max / len(unique_transforms))\n args = ax, x, y, box_width, box_height, color\n cx, cy = self._rectangle(*args, f\"Transform\\n{delim}\\n{transform}\")\n transform_positions[transform] = cx, cy\n color = colors[1]\n x = x_positions[1]\n self._rectangles(\n ax,\n color,\n x,\n y_max,\n box_width,\n box_height,\n delim,\n unique_extractors,\n extractor_positions,\n )\n color = colors[2]\n x = x_positions[2]\n self._rectangles(\n ax,\n color,\n x,\n y_max,\n box_width,\n box_height,\n delim,\n all_heads,\n head_positions,\n )\n color = colors[3]\n x, y = x_positions[3], 0.5 * y_max\n args = ax, x, y, box_width, box_height, color\n aggregator = self.environment.model_config[\"aggregator\"]\n cx, cy = self._rectangle(*args, f\"Aggregator\\n{delim}\\n{aggregator}\")\n aggregator_position = cx, cy\n\n # arrows\n for key in sorted_keys:\n transform = transforms_mapping[key]\n extractor = extractors_mapping[key]\n head_tuple = heads_mapping[key]\n x1, y1 = transform_positions[transform]\n x2, y2 = extractor_positions[extractor]\n x3, y3 = head_positions[head_tuple]\n self._arrow(ax, x1, y1, x2, y2, half_box_width)\n self._arrow(ax, x2, y2, x3, y3, half_box_width)\n self._arrow(ax, x3, y3, *aggregator_position, half_box_width)\n\n ax.set_xlim(x_min, x_max + box_width + 0.1)\n ax.set_ylim(0, y_max + box_height)\n show_or_save(export_path, fig)\n return self\n\n def fit(\n self,\n x: data_type,\n y: data_type = None,\n x_cv: data_type = None,\n y_cv: data_type = None,\n *,\n pretrain_strict: bool = False,\n pretrain_folder: Optional[str] = None,\n pretrain_identifier: Optional[str] = None,\n state_dict_callback: Optional[Callable[[Dict[str, Any]], None]] = None,\n sample_weights: Optional[np.ndarray] = None,\n ) -> \"Pipeline\":\n self._before_loop(x, y, x_cv, y_cv, sample_weights)\n self._handle_pretrain(\n pretrain_strict,\n pretrain_folder,\n pretrain_identifier,\n state_dict_callback,\n )\n self._loop()\n # finalize mlflow\n run_id = self.trainer.run_id\n mlflow_client = self.trainer.mlflow_client\n if mlflow_client is not None:\n # log model\n if self.production is not None:\n import mlflow\n import cflearn\n\n cwd = os.getcwd()\n root_folder = os.path.join(os.path.dirname(__file__), os.pardir)\n conda_env = os.path.join(os.path.abspath(root_folder), \"conda.yml\")\n if self.production == \"pack\":\n pack_folder = os.path.join(self.logging_folder, \"__packed__\")\n pack_folder = to_relative(os.path.abspath(pack_folder), cwd)\n cflearn.Pack.pack(self, pack_folder, compress=False, verbose=False)\n mlflow.pyfunc.save_model(\n os.path.join(self.logging_folder, \"__pyfunc__\"),\n python_model=cflearn.PackModel(),\n artifacts={\"pack_folder\": pack_folder},\n conda_env=conda_env,\n )\n cflearn._rmtree(pack_folder)\n elif self.production == \"pipeline\":\n export_folder = os.path.join(self.logging_folder, \"pipeline\")\n export_folder = to_relative(os.path.abspath(export_folder), cwd)\n self.save(export_folder, compress=False)\n mlflow.pyfunc.save_model(\n os.path.join(self.logging_folder, \"__pyfunc__\"),\n python_model=cflearn.PipelineModel(),\n artifacts={\"export_folder\": export_folder},\n conda_env=conda_env,\n )\n else:\n msg = f\"unrecognized production type '{self.production}' found\"\n raise NotImplementedError(msg)\n # log artifacts\n if self.environment.log_pipeline_to_artifacts:\n if self.production != \"pipeline\":\n self.save(os.path.join(self.logging_folder, \"pipeline\"))\n self.trainer._log_artifacts()\n # terminate\n mlflow_client.set_terminated(run_id)\n return self\n\n def predict(\n self,\n x: data_type,\n *,\n return_all: bool = False,\n contains_labels: bool = False,\n requires_recover: bool = True,\n returns_probabilities: bool = False,\n **kwargs: Any,\n ) -> Union[np.ndarray, Dict[str, np.ndarray]]:\n if self.inference is None:\n raise ValueError(\"`inference` is not yet generated\")\n loader = self.preprocessor.make_inference_loader(\n x,\n self.device,\n self.cv_batch_size,\n is_onnx=self.inference.onnx is not None,\n contains_labels=contains_labels,\n )\n kwargs = shallow_copy_dict(kwargs)\n kwargs.update(\n {\n \"return_all\": return_all,\n \"requires_recover\": requires_recover,\n \"returns_probabilities\": returns_probabilities,\n }\n )\n\n if self.inference is None:\n raise ValueError(\"`inference` is not yet generated\")\n return self.inference.predict(loader, **shallow_copy_dict(kwargs))\n\n def predict_prob(\n self,\n x: data_type,\n *,\n return_all: bool = False,\n contains_labels: bool = False,\n **kwargs: Any,\n ) -> Union[np.ndarray, Dict[str, np.ndarray]]:\n if self.data.is_reg:\n raise ValueError(\"`predict_prob` should not be called on regression tasks\")\n return self.predict(\n x,\n return_all=return_all,\n contains_labels=contains_labels,\n returns_probabilities=True,\n **shallow_copy_dict(kwargs),\n )\n\n def to_pattern(\n self,\n *,\n pre_process: Optional[Callable] = None,\n **predict_kwargs: Any,\n ) -> ModelPattern:\n def _predict(x: np.ndarray) -> np.ndarray:\n if pre_process is not None:\n x = pre_process(x)\n return self.predict(x, **predict_kwargs)\n\n def _predict_prob(x: np.ndarray) -> np.ndarray:\n if pre_process is not None:\n x = pre_process(x)\n return self.predict_prob(x, **predict_kwargs)\n\n return ModelPattern(\n init_method=lambda: self,\n predict_method=_predict,\n predict_prob_method=_predict_prob,\n )\n\n data_folder = \"data\"\n train_folder = \"train\"\n valid_folder = \"valid\"\n original_folder = \"original\"\n train_indices_file = \"train_indices.npy\"\n valid_indices_file = \"valid_indices.npy\"\n sample_weights_file = \"sample_weights.npy\"\n final_results_file = \"final_results.json\"\n registered_pipes_file = \"registered_pipes.json\"\n\n @classmethod\n def make(\n cls,\n config: Dict[str, Any],\n increment_config: Dict[str, Any],\n ) -> \"Pipeline\":\n return cls(Environment.from_elements(Elements.make(config, increment_config)))\n\n def save(\n self,\n export_folder: Optional[str] = None,\n *,\n compress: bool = True,\n remove_original: bool = True,\n ) -> \"Pipeline\":\n if export_folder is None:\n export_folder = self.trainer.checkpoint_folder\n abs_folder = os.path.abspath(export_folder)\n base_folder = os.path.dirname(abs_folder)\n with lock_manager(base_folder, [export_folder]):\n # data\n data_folder = os.path.join(export_folder, self.data_folder)\n os.makedirs(data_folder, exist_ok=True)\n if self.sample_weights is not None:\n sw_file = os.path.join(data_folder, self.sample_weights_file)\n np.save(sw_file, self.sample_weights)\n if not self._save_original_data:\n assert self.cv_data is not None\n train_data_folder = os.path.join(data_folder, self.train_folder)\n valid_data_folder = os.path.join(data_folder, self.valid_folder)\n self.tr_data.save(train_data_folder, compress=False)\n self.cv_data.save(valid_data_folder, compress=False)\n else:\n original_data_folder = os.path.join(data_folder, self.original_folder)\n self._original_data.save(original_data_folder, compress=False)\n if self.tr_split_indices is not None:\n tr_file = os.path.join(data_folder, self.train_indices_file)\n np.save(tr_file, self.tr_split_indices)\n if self.cv_split_indices is not None:\n cv_file = os.path.join(data_folder, self.valid_indices_file)\n np.save(cv_file, self.cv_split_indices)\n # registered pipes\n if self.model is None:\n raise ValueError(\"`model` is not yet generated\")\n pipes = self.model.registered_pipes\n pipes_path = os.path.join(export_folder, self.registered_pipes_file)\n with open(pipes_path, \"w\") as f:\n json.dump(pipes, f)\n # final results\n final_results = self.trainer.final_results\n if final_results is None:\n raise ValueError(\"`final_results` are not generated yet\")\n with open(os.path.join(export_folder, self.final_results_file), \"w\") as f:\n json.dump(final_results, f)\n # pytorch checkpoint\n score = final_results.final_score\n self.trainer.save_checkpoint(score, export_folder)\n # misc config bundle\n if self.inference is None:\n raise ValueError(\"`inference` is not yet generated\")\n config_bundle = {\n \"config\": shallow_copy_dict(self.user_config),\n \"increment_config\": shallow_copy_dict(self.user_inc_config),\n \"binary_config\": self.inference.binary_config,\n }\n Saving.save_dict(config_bundle, self.config_bundle_name, export_folder)\n # compress\n if compress:\n Saving.compress(abs_folder, remove_original=remove_original)\n return self\n\n @classmethod\n def load(cls, export_folder: str, *, compress: bool = True) -> \"Pipeline\":\n base_folder = os.path.dirname(os.path.abspath(export_folder))\n with lock_manager(base_folder, [export_folder]):\n with Saving.compress_loader(export_folder, compress):\n # misc config bundle\n config_bundle = Saving.load_dict(cls.config_bundle_name, export_folder)\n user_config = config_bundle[\"config\"]\n user_increment_config = config_bundle[\"increment_config\"]\n user_increment_config[\"binary_config\"] = config_bundle[\"binary_config\"]\n user_increment_config[\"verbose_level\"] = 0\n pipeline = cls.make(user_config, user_increment_config)\n # sample weights\n data_folder = os.path.join(export_folder, cls.data_folder)\n tr_weights = cv_weights = sample_weights = None\n sw_file = os.path.join(data_folder, cls.sample_weights_file)\n if os.path.isfile(sw_file):\n sample_weights = np.load(sw_file)\n # data\n cv_data: Optional[DataProtocol]\n data_base = DataProtocol.get(pipeline.data_protocol)\n original_data_folder = os.path.join(data_folder, cls.original_folder)\n if not os.path.isdir(original_data_folder):\n train_data_folder = os.path.join(data_folder, cls.train_folder)\n valid_data_folder = os.path.join(data_folder, cls.valid_folder)\n try:\n tr_data = data_base.load(train_data_folder, compress=False)\n cv_data = data_base.load(valid_data_folder, compress=False)\n except Exception as e:\n raise ValueError(\n f\"data information is corrupted ({e}), \"\n \"this may cause by backward compatible breaking\"\n )\n original_data = tr_data\n if sample_weights is not None:\n tr_weights = sample_weights[: len(tr_data)]\n cv_weights = sample_weights[len(tr_data) :]\n else:\n original_data = data_base.load(\n original_data_folder,\n compress=False,\n )\n vi_file = os.path.join(data_folder, cls.valid_indices_file)\n if not os.path.isfile(vi_file):\n tr_weights = sample_weights\n tr_data = original_data\n cv_data = None\n else:\n ti_file = os.path.join(data_folder, cls.train_indices_file)\n train_indices, valid_indices = map(np.load, [ti_file, vi_file])\n split = original_data.split_with_indices(\n valid_indices, train_indices\n )\n tr_data, cv_data = split.remained, split.split\n if sample_weights is not None:\n tr_weights = sample_weights[train_indices]\n cv_weights = sample_weights[valid_indices]\n pipeline.sample_weights = sample_weights\n pipeline.tr_weights = tr_weights\n pipeline.cv_weights = cv_weights\n pipeline._original_data = original_data\n pipeline.tr_data = tr_data\n pipeline.cv_data = cv_data\n pipeline._init_data()\n # registered pipes\n pipes_path = os.path.join(export_folder, cls.registered_pipes_file)\n if not os.path.isfile(pipes_path):\n pipes = None\n else:\n with open(pipes_path, \"r\") as f:\n pipes = {k: PipeConfig(*v) for k, v in json.load(f).items()}\n # prepare modules\n pipeline._prepare_modules(\n is_loading=True,\n loaded_registered_pipes=pipes,\n )\n trainer = pipeline.trainer\n trainer.state.inject_loader(pipeline.tr_loader)\n trainer.tr_loader = PrefetchLoader(pipeline.tr_loader, pipeline.device)\n cv_loader = pipeline.cv_loader\n if cv_loader is None:\n trainer.cv_loader = None\n else:\n trainer.cv_loader = PrefetchLoader(cv_loader, pipeline.device)\n # pytorch checkpoint\n trainer.restore_checkpoint(export_folder)\n # final results\n trainer._init_metrics()\n final_results_path = os.path.join(export_folder, cls.final_results_file)\n with open(final_results_path, \"r\") as f:\n trainer.final_results = IntermediateResults(*json.load(f))\n return pipeline\n\n def profile_forward(self, *, num_repeat: int = 100, **kwargs: Any) -> None:\n if self.model is None:\n raise ValueError(\"`model` is not generated yet\")\n input_sample = {k: v.to(\"cpu\") for k, v in self.model.input_sample.items()}\n self.model.to(\"cpu\")\n with eval_context(self.model):\n with torch.autograd.profiler.profile(**kwargs) as prof:\n for _ in tqdm(range(num_repeat), total=num_repeat):\n self.model(input_sample)\n print(prof.key_averages().table(sort_by=\"self_cpu_time_total\"))\n self.model.to(self.device)\n\n\n__all__ = [\"Pipeline\"]\n"
] | [
[
"matplotlib.patches.Rectangle",
"numpy.save",
"numpy.load",
"torch.autograd.profiler.profile",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
johnjasa/floris | [
"078fe256463044bebca5ada18c7a9db7a1031b12"
] | [
"floris/tools/optimization/scipy/yaw_wind_rose_parallel.py"
] | [
"# Copyright 2020 NREL\n\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not\n# use this file except in compliance with the License. You may obtain a copy of\n# the License at http://www.apache.org/licenses/LICENSE-2.0\n\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations under\n# the License.\n\n# See https://floris.readthedocs.io for documentation\n\nfrom itertools import repeat\n\nimport numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\n\nfrom floris.tools.optimization.scipy.yaw_wind_rose import YawOptimizationWindRose\n\nfrom ....logging_manager import LoggerBase\n\n\nclass YawOptimizationWindRoseParallel(YawOptimizationWindRose, LoggerBase):\n \"\"\"\n YawOptimizationWindRose is a subclass of\n :py:class:`~.tools.optimizationscipy.YawOptimizationWindRose` that is used\n to perform parallel computing to optimize the yaw angles of all turbines in\n a Floris Farm for multiple sets of inflow conditions (combinations of wind\n speed, wind direction, and optionally turbulence intensity) using the scipy\n optimize package. Parallel optimization is performed using the\n MPIPoolExecutor method of the mpi4py.futures module.\n \"\"\"\n\n def __init__(\n self,\n fi,\n wd,\n ws,\n ti=None,\n minimum_yaw_angle=0.0,\n maximum_yaw_angle=25.0,\n minimum_ws=3.0,\n maximum_ws=25.0,\n x0=None,\n bnds=None,\n opt_method=\"SLSQP\",\n opt_options=None,\n include_unc=False,\n unc_pmfs=None,\n unc_options=None,\n ):\n \"\"\"\n Instantiate YawOptimizationWindRoseParallel object with a\n FlorisInterface object and assign parameter values.\n\n Args:\n fi (:py:class:`~.tools.floris_interface.FlorisInterface`):\n Interface used to interact with the Floris object.\n wd (iterable) : The wind directions for which the yaw angles are\n optimized (deg).\n ws (iterable): The wind speeds for which the yaw angles are\n optimized (m/s).\n ti (iterable, optional): An optional list of turbulence intensity\n values for which the yaw angles are optimized. If not\n specified, the current TI value in the Floris object will be\n used for all optimizations. Defaults to None.\n minimum_yaw_angle (float, optional): Minimum constraint on yaw\n angle (deg). Defaults to 0.0.\n maximum_yaw_angle (float, optional): Maximum constraint on yaw\n angle (deg). Defaults to 25.0.\n minimum_ws (float, optional): Minimum wind speed at which\n optimization is performed (m/s). Assumes zero power generated\n below this value. Defaults to 3.\n maximum_ws (float, optional): Maximum wind speed at which\n optimization is performed (m/s). Assumes optimal yaw offsets\n are zero above this wind speed. Defaults to 25.\n x0 (iterable, optional): The initial yaw conditions (deg). If none\n are specified, they are set to the current yaw angles for all\n turbines. Defaults to None.\n bnds (iterable, optional): Bounds for the yaw angles (tuples of\n min, max values for each turbine (deg)). If none are specified,\n they are set to (minimum_yaw_angle, maximum_yaw_angle) for each\n turbine. Defaults to None.\n opt_method (str, optional): The optimization method used by\n scipy.optimize.minize. Defaults to 'SLSQP'.\n opt_options (dictionary, optional): Optimization options used by\n scipy.optimize.minize. If none are specified, they are set to\n {'maxiter': 100, 'disp': False, 'iprint': 1, 'ftol': 1e-7,\n 'eps': 0.01}. Defaults to None.\n include_unc (bool, optional): Determines whether wind direction or\n yaw uncertainty are included. If True, uncertainty in wind\n direction and/or yaw position is included when determining wind\n farm power. Uncertainty is included by computing the mean wind\n farm power for a distribution of wind direction and yaw\n position deviations from the intended wind direction and yaw\n angles. Defaults to False.\n unc_pmfs (dictionary, optional): A dictionary containing\n probability mass functions describing the distribution of wind\n direction and yaw position deviations when wind direction and\n or yaw position uncertainty is included in the power\n calculations. Contains the following key-value pairs:\n\n - **wd_unc** (*np.array*): The wind direction\n deviations from the intended wind direction (deg).\n - **wd_unc_pmf** (*np.array*): The probability\n of each wind direction deviation in **wd_unc** occuring.\n - **yaw_unc** (*np.array*): The yaw angle deviations\n from the intended yaw angles (deg).\n - **yaw_unc_pmf** (*np.array*): The probability\n of each yaw angle deviation in **yaw_unc** occuring.\n\n If none are specified, default PMFs are calculated using\n values provided in **unc_options**. Defaults to None.\n unc_options (dictionary, optional): A dictionary containing values\n used to create normally-distributed, zero-mean probability mass\n functions describing the distribution of wind direction and yaw\n position deviations when wind direction and/or yaw position\n uncertainty is included. This argument is only used when\n **unc_pmfs** is None and contains the following key-value pairs:\n\n - **std_wd** (*float*): The standard deviation of\n the wind direction deviations from the original wind\n direction (deg).\n - **std_yaw** (*float*): The standard deviation of\n the yaw angle deviations from the original yaw angles (deg).\n - **pmf_res** (*float*): The resolution in degrees\n of the wind direction and yaw angle PMFs.\n - **pdf_cutoff** (*float*): The cumulative\n distribution function value at which the tails of the\n PMFs are truncated.\n\n If none are specified, default values of\n {'std_wd': 4.95, 'std_yaw': 1.75, 'pmf_res': 1.0,\n 'pdf_cutoff': 0.995} are used. Defaults to None.\n \"\"\"\n super().__init__(\n fi,\n wd,\n ws,\n ti=ti,\n minimum_yaw_angle=minimum_yaw_angle,\n maximum_yaw_angle=maximum_yaw_angle,\n minimum_ws=minimum_ws,\n maximum_ws=maximum_ws,\n x0=x0,\n bnds=bnds,\n opt_method=opt_method,\n opt_options=opt_options,\n include_unc=include_unc,\n unc_pmfs=unc_pmfs,\n unc_options=unc_options,\n calc_init_power=False\n )\n\n # Private methods\n\n def _calc_baseline_power_one_case(self, ws, wd, ti=None):\n \"\"\"\n For a single (wind speed, direction, ti (optional)) combination, finds\n the baseline power produced by the wind farm and the ideal power\n without wake losses.\n\n Args:\n ws (float): The wind speed used in floris for the yaw optimization.\n wd (float): The wind direction used in floris for the yaw\n optimization.\n ti (float, optional): An optional turbulence intensity value for\n the yaw optimization. Defaults to None, meaning TI will not be\n included in the AEP calculations.\n\n Returns:\n - **df_base** (*Pandas DataFrame*) - DataFrame with a single row,\n containing the following columns:\n\n - **ws** (*float*) - The wind speed value for the row.\n - **wd** (*float*) - The wind direction value for the row.\n - **ti** (*float*) - The turbulence intensity value for the\n row. Only included if self.ti is not None.\n - **power_baseline** (*float*) - The total power produced by\n the wind farm with baseline yaw control (W).\n - **power_no_wake** (*float*) - The ideal total power produced\n by the wind farm without wake losses (W).\n - **turbine_power_baseline** (*list* (*float*)) - A\n list containing the baseline power without wake steering\n for each wind turbine (W).\n - **turbine_power_no_wake** (*list* (*float*)) - A list\n containing the ideal power without wake losses for each\n wind turbine (W).\n \"\"\"\n if ti is None:\n print(\n \"Computing wind speed = \"\n + str(ws)\n + \" m/s, wind direction = \"\n + str(wd)\n + \" deg.\"\n )\n else:\n print(\n \"Computing wind speed = \"\n + str(ws)\n + \" m/s, wind direction = \"\n + str(wd)\n + \" deg, turbulence intensity = \"\n + str(ti)\n + \".\"\n )\n\n # Find baseline power in FLORIS\n\n if ws >= self.minimum_ws:\n if ti is None:\n self.fi.reinitialize_flow_field(wind_direction=wd, wind_speed=ws)\n else:\n self.fi.reinitialize_flow_field(\n wind_direction=wd, wind_speed=ws, turbulence_intensity=ti\n )\n # calculate baseline power\n self.fi.calculate_wake(yaw_angles=0.0)\n power_base = self.fi.get_turbine_power(\n include_unc=self.include_unc,\n unc_pmfs=self.unc_pmfs,\n unc_options=self.unc_options,\n )\n\n # calculate power for no wake case\n self.fi.calculate_wake(no_wake=True)\n power_no_wake = self.fi.get_turbine_power(\n include_unc=self.include_unc,\n unc_pmfs=self.unc_pmfs,\n unc_options=self.unc_options,\n no_wake=True,\n )\n else:\n power_base = self.nturbs * [0.0]\n power_no_wake = self.nturbs * [0.0]\n\n # add variables to dataframe\n if ti is None:\n df_base = pd.DataFrame(\n {\n \"ws\": [ws],\n \"wd\": [wd],\n \"power_baseline\": [np.sum(power_base)],\n \"turbine_power_baseline\": [power_base],\n \"power_no_wake\": [np.sum(power_no_wake)],\n \"turbine_power_no_wake\": [power_no_wake],\n }\n )\n else:\n df_base = pd.DataFrame(\n {\n \"ws\": [ws],\n \"wd\": [wd],\n \"ti\": [ti],\n \"power_baseline\": [np.sum(power_base)],\n \"turbine_power_baseline\": [power_base],\n \"power_no_wake\": [np.sum(power_no_wake)],\n \"turbine_power_no_wake\": [power_no_wake],\n }\n )\n\n return df_base\n\n def _optimize_one_case(self, ws, wd, initial_farm_power, ti=None):\n \"\"\"\n For a single (wind speed, direction, ti (optional)) combination, finds\n the power resulting from optimal wake steering.\n\n Args:\n ws (float): The wind speed used in floris for the yaw optimization.\n wd (float): The wind direction used in floris for the yaw\n optimization.\n ti (float, optional): An optional turbulence intensity value for\n the yaw optimization. Defaults to None, meaning TI will not be\n included in the AEP calculations.\n\n Returns:\n - **df_opt** (*Pandas DataFrame*) - DataFrame with a single row,\n containing the following columns:\n\n - **ws** (*float*) - The wind speed value for the row.\n - **wd** (*float*) - The wind direction value for the row.\n - **ti** (*float*) - The turbulence intensity value for the\n row. Only included if self.ti is not None.\n - **power_opt** (*float*) - The total power produced by the\n wind farm with optimal yaw offsets (W).\n - **turbine_power_opt** (*list* (*float*)) - A list\n containing the power produced by each wind turbine with\n optimal yaw offsets (W).\n - **yaw_angles** (*list* (*float*)) - A list containing\n the optimal yaw offsets for maximizing total wind farm\n power for each wind turbine (deg).\n \"\"\"\n if ti is None:\n print(\n \"Computing wind speed = \"\n + str(ws)\n + \" m/s, wind direction = \"\n + str(wd)\n + \" deg.\"\n )\n else:\n print(\n \"Computing wind speed = \"\n + str(ws)\n + \" m/s, wind direction = \"\n + str(wd)\n + \" deg, turbulence intensity = \"\n + str(ti)\n + \".\"\n )\n\n # Optimizing wake redirection control\n\n if (ws >= self.minimum_ws) & (ws <= self.maximum_ws):\n if ti is None:\n self.fi.reinitialize_flow_field(wind_direction=wd, wind_speed=ws)\n else:\n self.fi.reinitialize_flow_field(\n wind_direction=wd, wind_speed=ws, turbulence_intensity=ti\n )\n\n self.initial_farm_power = initial_farm_power\n opt_yaw_angles = self._optimize()\n\n if np.sum(opt_yaw_angles) == 0:\n print(\n \"No change in controls suggested for this inflow \\\n condition...\"\n )\n\n # optimized power\n self.fi.calculate_wake(yaw_angles=opt_yaw_angles)\n power_opt = self.fi.get_turbine_power(\n include_unc=self.include_unc,\n unc_pmfs=self.unc_pmfs,\n unc_options=self.unc_options,\n )\n elif ws >= self.minimum_ws:\n print(\n \"No change in controls suggested for this inflow \\\n condition...\"\n )\n if ti is None:\n self.fi.reinitialize_flow_field(wind_direction=wd, wind_speed=ws)\n else:\n self.fi.reinitialize_flow_field(\n wind_direction=wd, wind_speed=ws, turbulence_intensity=ti\n )\n self.fi.calculate_wake(yaw_angles=0.0)\n opt_yaw_angles = self.nturbs * [0.0]\n power_opt = self.fi.get_turbine_power(\n include_unc=self.include_unc,\n unc_pmfs=self.unc_pmfs,\n unc_options=self.unc_options,\n )\n else:\n print(\n \"No change in controls suggested for this inflow \\\n condition...\"\n )\n opt_yaw_angles = self.nturbs * [0.0]\n power_opt = self.nturbs * [0.0]\n\n # add variables to dataframe\n if ti is None:\n df_opt = pd.DataFrame(\n {\n \"ws\": [ws],\n \"wd\": [wd],\n \"power_opt\": [np.sum(power_opt)],\n \"turbine_power_opt\": [power_opt],\n \"yaw_angles\": [opt_yaw_angles],\n }\n )\n else:\n df_opt = pd.DataFrame(\n {\n \"ws\": [ws],\n \"wd\": [wd],\n \"ti\": [ti],\n \"power_opt\": [np.sum(power_opt)],\n \"turbine_power_opt\": [power_opt],\n \"yaw_angles\": [opt_yaw_angles],\n }\n )\n\n return df_opt\n\n # Public methods\n\n def calc_baseline_power(self):\n \"\"\"\n This method computes the baseline power produced by the wind farm and\n the ideal power without wake losses for a series of wind speed, wind\n direction, and optionally TI combinations. The optimization for\n different wind condition combinations is parallelized using the mpi4py\n futures module.\n\n Returns:\n pandas.DataFrame: A pandas DataFrame with the same number of rows\n as the length of the wd and ws arrays, containing the following\n columns:\n\n - **ws** (*float*) - The wind speed values for which power is\n computed (m/s).\n - **wd** (*float*) - The wind direction value for which power\n is calculated (deg).\n - **ti** (*float*) - The turbulence intensity value for which\n power is calculated. Only included if self.ti is not None.\n - **power_baseline** (*float*) - The total power produced by\n he wind farm with baseline yaw control (W).\n - **power_no_wake** (*float*) - The ideal total power produced\n by the wind farm without wake losses (W).\n - **turbine_power_baseline** (*list* (*float*)) - A list\n containing the baseline power without wake steering for each\n wind turbine in the wind farm (W).\n - **turbine_power_no_wake** (*list* (*float*)) - A list\n containing the ideal power without wake losses for each wind\n turbine in the wind farm (W).\n \"\"\"\n try:\n from mpi4py.futures import MPIPoolExecutor\n except ImportError:\n err_msg = (\n \"It appears you do not have mpi4py installed. \"\n + \"Please refer to https://mpi4py.readthedocs.io/ for \"\n + \"guidance on how to properly install the module.\"\n )\n self.logger.error(err_msg, stack_info=True)\n raise ImportError(err_msg)\n\n print(\"=====================================================\")\n print(\"Calculating baseline power in parallel...\")\n print(\"Number of wind conditions to calculate = \", len(self.wd))\n print(\"=====================================================\")\n\n df_base = pd.DataFrame()\n\n with MPIPoolExecutor() as executor:\n if self.ti is None:\n for df_base_one in executor.map(\n self._calc_baseline_power_one_case, self.ws.values, self.wd.values\n ):\n\n # add variables to dataframe\n df_base = df_base.append(df_base_one)\n else:\n for df_base_one in executor.map(\n self._calc_baseline_power_one_case,\n self.ws.values,\n self.wd.values,\n self.ti.values,\n ):\n\n # add variables to dataframe\n df_base = df_base.append(df_base_one)\n\n df_base.reset_index(drop=True, inplace=True)\n\n self.df_base = df_base\n return df_base\n\n def optimize(self):\n \"\"\"\n This method solves for the optimum turbine yaw angles for power\n production and the resulting power produced by the wind farm for a\n series of wind speed, wind direction, and optionally TI combinations.\n The optimization for different wind condition combinations is\n parallelized using the mpi4py.futures module.\n\n Returns:\n pandas.DataFrame: A pandas DataFrame with the same number of rows\n as the length of the wd and ws arrays, containing the following\n columns:\n\n - **ws** (*float*) - The wind speed values for which the yaw\n angles are optimized and power is computed (m/s).\n - **wd** (*float*) - The wind direction values for which the\n yaw angles are optimized and power is computed (deg).\n - **ti** (*float*) - The turbulence intensity values for which\n the yaw angles are optimized and power is computed. Only\n included if self.ti is not None.\n - **power_opt** (*float*) - The total power produced by the\n wind farm with optimal yaw offsets (W).\n - **turbine_power_opt** (*list* (*float*)) - A list containing\n the power produced by each wind turbine with optimal yaw\n offsets (W).\n - **yaw_angles** (*list* (*float*)) - A list containing the\n optimal yaw offsets for maximizing total wind farm power for\n each wind turbine (deg).\n \"\"\"\n try:\n from mpi4py.futures import MPIPoolExecutor\n except ImportError:\n err_msg = (\n \"It appears you do not have mpi4py installed. \"\n + \"Please refer to https://mpi4py.readthedocs.io/ for \"\n + \"guidance on how to properly install the module.\"\n )\n self.logger.error(err_msg, stack_info=True)\n raise ImportError(err_msg)\n\n print(\"=====================================================\")\n print(\"Optimizing wake redirection control in parallel...\")\n print(\"Number of wind conditions to optimize = \", len(self.wd))\n print(\"Number of yaw angles to optimize = \", len(self.x0))\n print(\"=====================================================\")\n\n df_opt = pd.DataFrame()\n\n with MPIPoolExecutor() as executor:\n if self.ti is None:\n for df_opt_one in executor.map(\n self._optimize_one_case,\n self.ws.values,\n self.wd.values,\n self.df_base.power_baseline.values,\n ):\n\n # add variables to dataframe\n df_opt = df_opt.append(df_opt_one)\n else:\n for df_opt_one in executor.map(\n self._optimize_one_case,\n self.ws.values,\n self.wd.values,\n self.df_base.power_baseline.values,\n self.ti.values,\n ):\n\n # add variables to dataframe\n df_opt = df_opt.append(df_opt_one)\n\n df_opt.reset_index(drop=True, inplace=True)\n\n return df_opt\n"
] | [
[
"numpy.sum",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
goodwanghan/ibis | [
"02cbab7f3a3a62b3e0a9107549891823d89a4de9"
] | [
"ibis/backends/pandas/tests/execution/test_timecontext.py"
] | [
"import pandas as pd\nimport pandas.testing as tm\nimport pytest\n\nimport ibis\nimport ibis.common.exceptions as com\nimport ibis.expr.operations as ops\nfrom ibis.backends.pandas.execution import execute\nfrom ibis.backends.pandas.execution.window import trim_window_result\nfrom ibis.expr.scope import Scope\nfrom ibis.expr.timecontext import (\n TimeContextRelation,\n adjust_context,\n compare_timecontext,\n construct_time_context_aware_series,\n)\nfrom ibis.expr.types import TimeContext\n\n\nclass CustomAsOfJoin(ops.AsOfJoin):\n pass\n\n\ndef test_execute_with_timecontext(time_table):\n expr = time_table\n # define a time context for time-series data\n context = (pd.Timestamp('20170101'), pd.Timestamp('20170103'))\n\n # without time context, execute produces every row\n df_all = expr.execute()\n assert len(df_all['time']) == 8\n\n # with context set, execute produces only rows within context\n df_within_context = expr.execute(timecontext=context)\n assert len(df_within_context['time']) == 1\n\n\ndef test_bad_timecontext(time_table, t):\n expr = time_table\n\n # define context with illegal string\n with pytest.raises(com.IbisError, match=r\".*type pd.Timestamp.*\"):\n context = ('bad', 'context')\n expr.execute(timecontext=context)\n\n # define context with unsupport type int\n with pytest.raises(com.IbisError, match=r\".*type pd.Timestamp.*\"):\n context = (20091010, 20100101)\n expr.execute(timecontext=context)\n\n # define context with too few values\n with pytest.raises(com.IbisError, match=r\".*should specify.*\"):\n context = pd.Timestamp('20101010')\n expr.execute(timecontext=context)\n\n # define context with begin value later than end\n with pytest.raises(com.IbisError, match=r\".*before or equal.*\"):\n context = (pd.Timestamp('20101010'), pd.Timestamp('20090101'))\n expr.execute(timecontext=context)\n\n # execute context with a table without TIME_COL\n with pytest.raises(com.IbisError, match=r\".*must have a time column.*\"):\n context = (pd.Timestamp('20090101'), pd.Timestamp('20100101'))\n t.execute(timecontext=context)\n\n\ndef test_bad_call_to_adjust_context():\n op = \"not_a_node\"\n context = (pd.Timestamp('20170101'), pd.Timestamp('20170103'))\n scope = Scope()\n with pytest.raises(\n com.IbisError, match=r\".*Unsupported input type for adjust context.*\"\n ):\n adjust_context(op, scope, context)\n\n\ndef test_compare_timecontext():\n c1 = (pd.Timestamp('20170101'), pd.Timestamp('20170103'))\n c2 = (pd.Timestamp('20170101'), pd.Timestamp('20170111'))\n c3 = (pd.Timestamp('20160101'), pd.Timestamp('20160103'))\n c4 = (pd.Timestamp('20161215'), pd.Timestamp('20170102'))\n assert compare_timecontext(c1, c2) == TimeContextRelation.SUBSET\n assert compare_timecontext(c2, c1) == TimeContextRelation.SUPERSET\n assert compare_timecontext(c1, c4) == TimeContextRelation.OVERLAP\n assert compare_timecontext(c1, c3) == TimeContextRelation.NONOVERLAP\n\n\ndef test_context_adjustment_asof_join(\n time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2\n):\n expr = time_keyed_left.asof_join(\n time_keyed_right, 'time', by='key', tolerance=4 * ibis.interval(days=1)\n )[time_keyed_left, time_keyed_right.other_value]\n context = (pd.Timestamp('20170105'), pd.Timestamp('20170111'))\n result = expr.execute(timecontext=context)\n\n # compare with asof_join of manually trimmed tables\n trimmed_df1 = time_keyed_df1[time_keyed_df1['time'] >= context[0]][\n time_keyed_df1['time'] < context[1]\n ]\n trimmed_df2 = time_keyed_df2[\n time_keyed_df2['time'] >= context[0] - pd.Timedelta(days=4)\n ][time_keyed_df2['time'] < context[1]]\n expected = pd.merge_asof(\n trimmed_df1,\n trimmed_df2,\n on='time',\n by='key',\n tolerance=pd.Timedelta('4D'),\n )\n tm.assert_frame_equal(result, expected)\n\n\[email protected](\n ['interval_ibis', 'interval_pd'],\n [\n (ibis.interval(days=1), '1d'),\n (3 * ibis.interval(days=1), '3d'),\n (5 * ibis.interval(days=1), '5d'),\n ],\n)\ndef test_context_adjustment_window(\n time_table, time_df3, interval_ibis, interval_pd\n):\n # trim data manually\n expected = (\n time_df3.set_index('time')\n .value.rolling(interval_pd, closed='both')\n .mean()\n )\n expected = expected[\n expected.index >= pd.Timestamp('20170105')\n ].reset_index(drop=True)\n\n context = pd.Timestamp('20170105'), pd.Timestamp('20170111')\n\n window = ibis.trailing_window(interval_ibis, order_by=time_table.time)\n expr = time_table['value'].mean().over(window)\n # result should adjust time context accordingly\n result = expr.execute(timecontext=context)\n tm.assert_series_equal(result, expected)\n\n\ndef test_trim_window_result(time_df3):\n \"\"\"Unit test `trim_window_result` in Window execution\"\"\"\n df = time_df3.copy()\n context = pd.Timestamp('20170105'), pd.Timestamp('20170111')\n\n # trim_window_result takes a MultiIndex Series as input\n series = df['value']\n time_index = df.set_index('time').index\n series.index = pd.MultiIndex.from_arrays(\n [series.index, time_index],\n names=series.index.names + ['time'],\n )\n result = trim_window_result(series, context)\n expected = df['time'][df['time'] >= pd.Timestamp('20170105')].reset_index(\n drop=True\n )\n\n # result should adjust time context accordingly\n tm.assert_series_equal(result.reset_index()['time'], expected)\n\n # trim with a non-datetime type of 'time' throws Exception\n wrong_series = df['id']\n df['time'] = df['time'].astype(str)\n time_index = df.set_index('time').index\n wrong_series.index = pd.MultiIndex.from_arrays(\n [wrong_series.index, time_index],\n names=wrong_series.index.names + ['time'],\n )\n with pytest.raises(\n TypeError, match=r\".*not supported between instances.*\"\n ):\n trim_window_result(wrong_series, context)\n\n # column is ignored and series is not trimmed\n no_context_result = trim_window_result(series, None)\n tm.assert_series_equal(no_context_result, series)\n\n\ndef test_setting_timecontext_in_scope(time_table, time_df3):\n expected_win_1 = (\n time_df3.set_index('time').value.rolling('3d', closed='both').mean()\n )\n expected_win_1 = expected_win_1[\n expected_win_1.index >= pd.Timestamp('20170105')\n ].reset_index(drop=True)\n\n context = pd.Timestamp('20170105'), pd.Timestamp('20170111')\n window1 = ibis.trailing_window(\n 3 * ibis.interval(days=1), order_by=time_table.time\n )\n \"\"\"\n In the following expression, Selection node will be executed first and\n get table in context ('20170105', '20170101'). Then in window execution\n table will be executed again with a larger context adjusted by window\n preceeding days ('20170102', '20170111'). To get the correct result,\n the cached table result with a smaller context must be discard and updated\n to a larger time range.\n \"\"\"\n expr = time_table.mutate(value=time_table['value'].mean().over(window1))\n result = expr.execute(timecontext=context)\n tm.assert_series_equal(result[\"value\"], expected_win_1)\n\n\ndef test_context_adjustment_multi_window(time_table, time_df3):\n expected_win_1 = (\n time_df3.set_index('time')\n .rename(columns={'value': 'v1'})['v1']\n .rolling('3d', closed='both')\n .mean()\n )\n expected_win_1 = expected_win_1[\n expected_win_1.index >= pd.Timestamp('20170105')\n ].reset_index(drop=True)\n\n expected_win_2 = (\n time_df3.set_index('time')\n .rename(columns={'value': 'v2'})['v2']\n .rolling('2d', closed='both')\n .mean()\n )\n expected_win_2 = expected_win_2[\n expected_win_2.index >= pd.Timestamp('20170105')\n ].reset_index(drop=True)\n\n context = pd.Timestamp('20170105'), pd.Timestamp('20170111')\n window1 = ibis.trailing_window(\n 3 * ibis.interval(days=1), order_by=time_table.time\n )\n window2 = ibis.trailing_window(\n 2 * ibis.interval(days=1), order_by=time_table.time\n )\n expr = time_table.mutate(\n v1=time_table['value'].mean().over(window1),\n v2=time_table['value'].mean().over(window2),\n )\n result = expr.execute(timecontext=context)\n\n tm.assert_series_equal(result[\"v1\"], expected_win_1)\n tm.assert_series_equal(result[\"v2\"], expected_win_2)\n\n\ndef test_context_adjustment_window_groupby_id(time_table, time_df3):\n \"\"\"This test case is meant to test trim_window_result method\n in pandas/execution/window.py to see if it could trim Series\n correctly with groupby params\n \"\"\"\n expected = (\n time_df3.set_index('time')\n .groupby('id')\n .value.rolling('3d', closed='both')\n .mean()\n )\n # This is a MultiIndexed Series\n expected = expected.reset_index()\n expected = expected[expected.time >= pd.Timestamp('20170105')].reset_index(\n drop=True\n )['value']\n\n context = pd.Timestamp('20170105'), pd.Timestamp('20170111')\n\n # expected.index.name = None\n window = ibis.trailing_window(\n 3 * ibis.interval(days=1), group_by='id', order_by=time_table.time\n )\n expr = time_table['value'].mean().over(window)\n # result should adjust time context accordingly\n result = expr.execute(timecontext=context)\n tm.assert_series_equal(result, expected)\n\n\ndef test_adjust_context_scope(time_keyed_left, time_keyed_right):\n \"\"\"Test that `adjust_context` has access to `scope` by default.\"\"\"\n\n @adjust_context.register(CustomAsOfJoin)\n def adjust_context_custom_asof_join(\n op: ops.AsOfJoin,\n scope: Scope,\n timecontext: TimeContext,\n ) -> TimeContext:\n \"\"\"Confirms that `scope` is passed in.\"\"\"\n assert scope is not None\n return timecontext\n\n expr = CustomAsOfJoin(\n left=time_keyed_left,\n right=time_keyed_right,\n predicates='time',\n by='key',\n tolerance=ibis.interval(days=4),\n ).to_expr()\n expr = expr[time_keyed_left, time_keyed_right.other_value]\n context = (pd.Timestamp('20170105'), pd.Timestamp('20170111'))\n expr.execute(timecontext=context)\n\n\ndef test_adjust_context_complete_shift(\n time_keyed_left,\n time_keyed_right,\n time_keyed_df1,\n time_keyed_df2,\n):\n \"\"\"Test `adjust_context` function that completely shifts the context.\n\n This results in an adjusted context that is NOT a subset of the\n original context. This is unlike an `adjust_context` function\n that only expands the context.\n\n See #3104\n \"\"\"\n\n # Create a contrived `adjust_context` function for\n # CustomAsOfJoin to mock this.\n\n @adjust_context.register(CustomAsOfJoin)\n def adjust_context_custom_asof_join(\n op: ops.AsOfJoin,\n scope: Scope,\n timecontext: TimeContext,\n ) -> TimeContext:\n \"\"\"Shifts both the begin and end in the same direction.\"\"\"\n\n begin, end = timecontext\n timedelta = execute(op.tolerance)\n return (begin - timedelta, end - timedelta)\n\n expr = CustomAsOfJoin(\n left=time_keyed_left,\n right=time_keyed_right,\n predicates='time',\n by='key',\n tolerance=ibis.interval(days=4),\n ).to_expr()\n expr = expr[time_keyed_left, time_keyed_right.other_value]\n context = (pd.Timestamp('20170101'), pd.Timestamp('20170111'))\n result = expr.execute(timecontext=context)\n\n # Compare with asof_join of manually trimmed tables\n # Left table: No shift for context\n # Right table: Shift both begin and end of context by 4 days\n trimmed_df1 = time_keyed_df1[time_keyed_df1['time'] >= context[0]][\n time_keyed_df1['time'] < context[1]\n ]\n trimmed_df2 = time_keyed_df2[\n time_keyed_df2['time'] >= context[0] - pd.Timedelta(days=4)\n ][time_keyed_df2['time'] < context[1] - pd.Timedelta(days=4)]\n expected = pd.merge_asof(\n trimmed_df1,\n trimmed_df2,\n on='time',\n by='key',\n tolerance=pd.Timedelta('4D'),\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_construct_time_context_aware_series(time_df3):\n \"\"\"Unit test for `construct_time_context_aware_series`\"\"\"\n # Series without 'time' index will result in a MultiIndex with 'time'\n df = time_df3\n expected = df['value']\n time_index = pd.Index(df['time'])\n expected.index = pd.MultiIndex.from_arrays(\n [expected.index, time_index],\n names=expected.index.names + ['time'],\n )\n result = construct_time_context_aware_series(df['value'], df)\n tm.assert_series_equal(result, expected)\n\n # Series with 'time' as index will not change\n time_indexed_df = time_df3.set_index('time')\n expected_time_aware = time_indexed_df['value']\n result_time_aware = construct_time_context_aware_series(\n time_indexed_df['value'], time_indexed_df\n )\n tm.assert_series_equal(result_time_aware, expected_time_aware)\n\n # Series with a MultiIndex, where 'time' is in the MultiIndex,\n # will not change\n multi_index_time_aware_series = result_time_aware\n expected_multi_index_time_aware = result_time_aware\n result_multi_index_time_aware = construct_time_context_aware_series(\n multi_index_time_aware_series, time_indexed_df\n )\n tm.assert_series_equal(\n result_multi_index_time_aware, expected_multi_index_time_aware\n )\n\n # Series with a MultiIndex, where 'time' is NOT in the MultiIndex,\n # 'time' will be added into the MultiIndex\n multi_index_series = df['id']\n expected_multi_index = df['id'].copy()\n other_index = pd.Index(df['value'])\n expected_multi_index.index = pd.MultiIndex.from_arrays(\n [expected_multi_index.index, other_index, time_index],\n names=expected_multi_index.index.names + ['value', 'time'],\n )\n multi_index_series.index = pd.MultiIndex.from_arrays(\n [multi_index_series.index, other_index],\n names=multi_index_series.index.names + ['value'],\n )\n result_multi_index = construct_time_context_aware_series(\n multi_index_series, df\n )\n tm.assert_series_equal(result_multi_index, expected_multi_index)\n"
] | [
[
"pandas.testing.assert_series_equal",
"pandas.Index",
"pandas.MultiIndex.from_arrays",
"pandas.Timedelta",
"pandas.testing.assert_frame_equal",
"pandas.Timestamp"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mengjian0502/EEE598_FinalProj | [
"6e4b8d0ec79eb3e7aa982f85ecadb7f36cdd7a84"
] | [
"train.py"
] | [
"\"\"\"\nMNIST classification challenge\n\nEEE598 Spring 2021\n\"\"\"\n\nimport os\nimport logging\nimport time\nimport argparse\nfrom utils import *\nimport torch\nimport torch.optim as optim\nimport models\nimport torch.nn as nn\nfrom collections import OrderedDict, defaultdict\nfrom functools import partial\n\nparser = argparse.ArgumentParser(description='PyTorch MNIST Example')\nparser.add_argument('--model', type=str, choices=['mlp_mnist', 'cnn_mnist', 'cnn_mnist_q', 'cnn_mnist_b'], help='model type')\nparser.add_argument('--batch_size', type=int, default=64, metavar='N', help='input batch size for training (default: 64)')\nparser.add_argument('--epochs', type=int, default=100, metavar='N', help='number of epochs to train (default: 14)')\nparser.add_argument('--lr', type=float, default=0.1, metavar='LR', help='learning rate (default: 0.1)')\nparser.add_argument('--schedule', type=int, nargs='+', default=[60, 80], help='Decrease learning rate at these epochs.')\nparser.add_argument('--gammas', type=float, nargs='+', default=[0.1, 0.1], help='LR is multiplied by gamma on schedule, number of gammas should be equal to schedule')\nparser.add_argument('--log_file', type=str, default=None, help='path to log file')\n\nparser.add_argument('--momentum', type=float, default=0.9, help='Momentum.')\nparser.add_argument('--weight_decay', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)')\n\nparser.add_argument(\"--depth\", required=True, type=int, nargs=\"+\")\n\nparser.add_argument('--save_path', type=str, default='./save/', help='Folder to save checkpoints and log.')\nparser.add_argument('--resume', default='', type=str, help='path of the pretrained model')\nparser.add_argument('--evaluate', action='store_true', help='evaluate the model')\n\n# activation clipping(PACT)\nparser.add_argument('--clp', dest='clp', action='store_true', help='using clipped relu in each stage')\nparser.add_argument('--a_lambda', type=float, default=0.01, help='The parameter of alpha L2 regularization')\n\n# bnn\nparser.add_argument('--binary', dest='binary', action='store_true', help='bnn training')\n\nargs = parser.parse_args()\n\nargs.use_cuda = torch.cuda.is_available()\n\ndef main(): \n if not os.path.isdir(args.save_path):\n os.makedirs(args.save_path) \n\n logger = logging.getLogger('training')\n if args.log_file is not None:\n fileHandler = logging.FileHandler(args.save_path+args.log_file)\n fileHandler.setLevel(0)\n logger.addHandler(fileHandler)\n streamHandler = logging.StreamHandler()\n streamHandler.setLevel(0)\n logger.addHandler(streamHandler)\n logger.root.setLevel(0)\n\n logger.info(args)\n\n # Prepare the dataset\n train_loader, valid_loader, test_loader = get_loader(args.batch_size, args.model)\n\n # Prepare the model\n logger.info('==> Building model..\\n')\n model_cfg = getattr(models, args.model)\n\n if 'mlp' in args.model:\n model_cfg.kwargs.update({\"depth\": args.depth, \"dropout\": True, \"drop_rate\":args.drop_rate})\n elif 'cnn' in args.model:\n model_cfg.kwargs.update({\"num_class\":10})\n else:\n raise NotImplementedError(\"The current implementations only works for CNN and MLP\")\n\n model = model_cfg.base(*model_cfg.args, **model_cfg.kwargs)\n logger.info(model)\n\n if args.use_cuda:\n model = model.cuda()\n \n if args.binary:\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)\n else:\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)\n criterion = nn.CrossEntropyLoss().cuda()\n\n # hook the output feature maps\n activations = defaultdict(list)\n def save_activation(name, mod, inp, out):\n\t activations[name].append(out.cpu()) \n\n # Evaluation only\n if args.evaluate:\n check_point = torch.load(args.resume)\n state_dict = check_point['state_dict']\n model.load_state_dict(state_dict)\n logger.info(f\"Successfully loaded {args.resume}, Pretrained acc = {check_point['acc']}\")\n \n test_results= test(test_loader, model, criterion)\n test_acc = test_results['acc']\n logger.info(f'Test accuracy: {test_acc}')\n exit()\n\n # Training\n epoch_time = AverageMeter()\n best_acc = 0.\n columns = ['ep', 'lr', 'tr_loss', 'tr_acc', 'val_loss', 'val_acc', 'best_acc']\n\n for epoch in range(args.epochs):\n current_lr, current_momentum = adjust_learning_rate_schedule(\n optimizer, epoch, args.gammas, args.schedule, args.lr, args.momentum)\n\n # Training phase\n train_results = train(train_loader, model, criterion, optimizer, args)\n\n # Test phase\n valid_results = test(valid_loader, model, criterion)\n is_best = valid_results['acc'] > best_acc\n\n if is_best:\n best_acc = valid_results['acc']\n\n state = {\n 'state_dict': model.state_dict(),\n 'acc': best_acc,\n 'epoch': epoch,\n 'optimizer': optimizer.state_dict(),\n }\n \n filename='checkpoint.pth.tar'\n save_checkpoint(state, is_best, args.save_path, filename=filename)\n \n values = [epoch + 1, optimizer.param_groups[0]['lr'], train_results['loss'], train_results['acc'], valid_results['loss'], valid_results['acc'], best_acc]\n\n print_table(values, columns, epoch, logger)\n\n # Test\n test_results = test(test_loader, model, criterion)\n test_acc = test_results['acc']\n logger.info(f'Test accuracy: {test_acc}')\n\n\nif __name__ == '__main__':\n main()"
] | [
[
"torch.nn.CrossEntropyLoss",
"torch.cuda.is_available",
"torch.load"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ysawa/zaif-trade-bot | [
"4e5e62d2d03fff773b018ff960e2d9a0587c9a6e"
] | [
"coincheck/price_model.py"
] | [
"from datetime import datetime\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Activation, Dropout\nfrom keras.layers import LSTM\nfrom keras.optimizers import Adam\nfrom keras.utils.data_utils import get_file\nimport numpy as np\nimport random, sys\n\nfrom sklearn import model_selection\n\nfrom base.foots import Foots\nfrom base.price_model import BasePriceModel\nimport pytz\n\n\nclass CoincheckPriceModel(BasePriceModel):\n\n def __init__(self):\n self.sentence_length = 288\n\n def fit(self, path):\n data = np.load(path)\n data_length = len(data)\n sentences_count = data_length - self.sentence_length\n print('data length:', data_length)\n print('sentences count:', sentences_count)\n X = []\n y = []\n for sentence_index in range(0, sentences_count):\n x = data[sentence_index:sentence_index + self.sentence_length]\n max_x = np.max(x)\n X.append(x / max_x)\n y_ = data[sentence_index + self.sentence_length]\n y.append(y_ / max_x)\n X = np.asarray(X, dtype=np.float64)\n y = np.asarray(y, dtype=np.float64)\n X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y)\n batch_size = 512\n self.model.fit(X_train, y_train, batch_size=batch_size)\n score = self.model.evaluate(X_test, y_test, batch_size=batch_size)\n print('score:', score)\n\n def make_data(self, path, limit=None):\n foots = Foots()\n file = open(path)\n count = 0\n while True:\n line = file.readline()\n if not line:\n break\n line = line.strip()\n csv = line.split(',')\n time = int(csv[0])\n last_price = float(csv[1])\n foots.add(time, last_price)\n if limit and count > limit:\n break\n count += 1\n file.close()\n foot_hashes = list(foots.foots.keys())\n first_foot_hash = foot_hashes[0]\n last_foot_hash = foot_hashes[-1]\n foot = None\n data = []\n for foot_hash in range(first_foot_hash, last_foot_hash + 1):\n if foot_hash in foots.foots:\n foot = foots.foots[foot_hash]\n data.append(foot.array)\n np.save(path, np.asarray(data))\n\n def make_model(self):\n model = Sequential()\n lstm = LSTM(128, input_shape=(self.sentence_length, 4), return_sequences=False)\n model.add(lstm)\n model.add(Dense(4))\n model.add(Activation(\"linear\"))\n optimizer = Adam(lr=0.001)\n model.compile(loss='mean_squared_error', optimizer=optimizer)\n self.model = model\n return self.model\n\n def predict(self, x):\n max_x = np.max(x)\n predicted = self.model.predict(x / max_x)\n return predicted * max_x\n"
] | [
[
"numpy.asarray",
"numpy.load",
"numpy.max",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Syzygianinfern0/espnet | [
"3ea59a0050e8a6a40138ac2365c258825b02f9cd"
] | [
"test/espnet2/main_funcs/test_calculate_all_attentions.py"
] | [
"from collections import defaultdict\n\nimport numpy as np\nimport pytest\nimport torch\n\nfrom espnet.nets.pytorch_backend.rnn.attentions import AttAdd\nfrom espnet.nets.pytorch_backend.transformer.attention import MultiHeadedAttention\nfrom espnet2.asr.decoder.rnn_decoder import RNNDecoder\nfrom espnet2.main_funcs.calculate_all_attentions import calculate_all_attentions\nfrom espnet2.train.abs_espnet_model import AbsESPnetModel\n\n\nclass Dummy(AbsESPnetModel):\n def __init__(self):\n super().__init__()\n self.att1 = MultiHeadedAttention(2, 10, 0.0)\n self.att2 = AttAdd(10, 20, 15)\n self.desired = defaultdict(list)\n\n def forward(self, x, x_lengths, y, y_lengths):\n a1 = self.att1(y, x, x, None)\n _, a2 = self.att2(x, x_lengths, y, None)\n self.desired[\"att1\"].append(a1)\n self.desired[\"att2\"].append(a2)\n\n def collect_feats(self, **batch: torch.Tensor):\n return {}\n\n\nclass Dummy2(AbsESPnetModel):\n def __init__(self, atype):\n super().__init__()\n self.decoder = RNNDecoder(50, 128, att_conf=dict(atype=atype))\n\n def forward(self, x, x_lengths, y, y_lengths):\n self.decoder(x, x_lengths, y, y_lengths)\n\n def collect_feats(self, **batch: torch.Tensor):\n return {}\n\n\ndef test_calculate_all_attentions_MultiHeadedAttention():\n model = Dummy()\n bs = 2\n batch = {\n \"x\": torch.randn(bs, 3, 10),\n \"x_lengths\": torch.tensor([3, 2], dtype=torch.long),\n \"y\": torch.randn(bs, 2, 10),\n \"y_lengths\": torch.tensor([4, 4], dtype=torch.long),\n }\n t = calculate_all_attentions(model, batch)\n print(t)\n for k in model.desired:\n for i in range(bs):\n np.testing.assert_array_equal(t[k][i].numpy(), model.desired[k][i].numpy())\n\n\[email protected](\n \"atype\",\n [\n \"noatt\",\n \"dot\",\n \"add\",\n \"location\",\n \"location2d\",\n \"location_recurrent\",\n \"coverage\",\n \"coverage_location\",\n \"multi_head_dot\",\n \"multi_head_add\",\n \"multi_head_loc\",\n \"multi_head_multi_res_loc\",\n ],\n)\ndef test_calculate_all_attentions(atype):\n model = Dummy2(atype)\n bs = 2\n batch = {\n \"x\": torch.randn(bs, 20, 128),\n \"x_lengths\": torch.tensor([20, 17], dtype=torch.long),\n \"y\": torch.randint(0, 50, [bs, 7]),\n \"y_lengths\": torch.tensor([7, 5], dtype=torch.long),\n }\n t = calculate_all_attentions(model, batch)\n for k, o in t.items():\n for i, att in enumerate(o):\n print(att.shape)\n if att.dim() == 2:\n att = att[None]\n for a in att:\n assert a.shape == (batch[\"y_lengths\"][i], batch[\"x_lengths\"][i])\n"
] | [
[
"torch.randn",
"torch.randint",
"torch.tensor"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wmonteiro92/xmoai-examples | [
"0286d57e15cb60693f57cdff386cbb246787442b"
] | [
"tests/sklearn_classification.py"
] | [
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 11 16:49:49 2020\n\n@author: wmonteiro92\n\"\"\"\n\nfrom xmoai.setup.configure import generate_counterfactuals_classification_proba, generate_counterfactuals_classification_simple\nfrom sklearn_data.datasets import load_sample_from_dataset\nfrom sklearn_data.models import train_ml_model\n\nimport numpy as np\nfrom view_results import view_results\n\ndef train_classification_proba(y_desired_index=1, verbose=False):\n \"\"\"Train multiple datasets (classification problem) under multiple\n scikit-learn implementations.\n\n :param y_desired_index: the index of the datasset to retrieve an instance\n to test. Default is 1.\n :type y_desired_index: Integer\n :param verbose: define the verbosity. Default is False.\n :type verbose: Boolean\n \n :return: an array containing the results found. Each row includes the \n dataset used, the algorithm used, the Pareto front, the Pareto set\n (i.e. the counterfactual variables) and the multiobjective optimization\n algorithm responsible of each counterfactual, respectively.\n :rtype: np.array\n \"\"\"\n datasets = ['breast_cancer', 'digits', 'iris', 'wine']\n algorithms = ['AdaBoostClassifier', 'ExtraTreesClassifier', \\\n 'GradientBoostingClassifier', 'RandomForestClassifier', \\\n 'DecisionTreeClassifier', 'SGDClassifier', \\\n 'LogisticRegression', 'KNeighborsClassifier']\n trained_models = []\n \n for dataset in datasets:\n if verbose:\n print(f'Retrieving the dataset {dataset}.')\n \n # Loading the database (in order to train the model) and \n # required additional metadata on the sample in order to generate\n # the contrafactuals\n X, y, X_current, y_desired, immutable_column_indexes, \\\n upper_bounds, lower_bounds, y_acceptable_range, \\\n categorical_columns, integer_columns = \\\n load_sample_from_dataset(y_desired_index, dataset)\n \n # Training a ML model\n for algorithm in algorithms:\n if verbose:\n print(f'Training using {algorithm}.')\n \n model = train_ml_model(X, y, algorithm)\n \n if verbose:\n print(f'Starting counterfactual generation.')\n \n pareto_front, pareto_set, pareto_algorithms = \\\n generate_counterfactuals_classification_proba(model, X_current, \\\n y_desired, immutable_column_indexes, y_acceptable_range, \\\n upper_bounds, lower_bounds, categorical_columns, \\\n integer_columns, n_gen=50, pop_size=100, \\\n max_changed_vars=5, verbose=verbose, seed=0)\n \n trained_models.append([dataset, algorithm, pareto_front,\n pareto_set, pareto_algorithms])\n \n return np.array(trained_models)\n\ndef train_classification_simple(y_desired_index=1, verbose=False):\n datasets = ['breast_cancer', 'digits', 'iris', 'wine']\n algorithms = ['LinearSVC', 'NuSVC', 'RidgeClassifier']\n trained_models = []\n \n for dataset in datasets:\n if verbose:\n print(f'Retrieving the dataset {dataset}.')\n \n # Loading the database (in order to train the model) and \n # required additional metadata on the sample in order to generate\n # the contrafactuals\n X, y, X_current, y_desired, immutable_column_indexes, \\\n upper_bounds, lower_bounds, y_acceptable_range, \\\n categorical_columns, integer_columns = \\\n load_sample_from_dataset(y_desired_index, dataset)\n \n # Training a ML model\n for algorithm in algorithms:\n if verbose:\n print(f'Training using {algorithm}.')\n \n model = train_ml_model(X, y, algorithm)\n \n if verbose:\n print(f'Starting counterfactual generation.')\n \n pareto_front, pareto_set, pareto_algorithms = \\\n generate_counterfactuals_classification_simple(model, X_current, \\\n y_desired, immutable_column_indexes, upper_bounds, \\\n lower_bounds, categorical_columns, integer_columns, \\\n n_gen=100, pop_size=100, max_changed_vars=10, \\\n verbose=verbose, seed=0)\n \n trained_models.append([dataset, algorithm, pareto_front, \n pareto_set, pareto_algorithms])\n \n return trained_models\n\nresults_proba = train_classification_proba(verbose=True)\nresults_simple = train_classification_simple(verbose=True)\n\nview_results(results_proba)\nview_results(results_simple)"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stsh224/field-robotics | [
"2729e739e540c4e348fc3181b6191a51ba7d2c40"
] | [
"robotics.py"
] | [
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Nov 30 21:05:08 2020\r\n\r\n@author: Tuck\r\n\"\"\"\r\n# dependent modules \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\n'''\r\nPosition and Frame Transformation\r\n======================================================================================\r\n'''\r\n\r\ndef trans12(x_tran,y_tran):\r\n \"\"\"Creates a homogeneous 2D translation matrix\r\n \r\n Parameters\r\n ----------\r\n x_tran : float\r\n The translation in the x-direction\r\n y_tran : float\r\n The translation in the y-direction \r\n \r\n Returns\r\n ----------\r\n res_mat: np.array\r\n A homogenous matrix for the requested translation\r\n \"\"\"\r\n\r\n mat = np.zeros((3,3))\r\n mat[0][2] = x_tran\r\n mat[1][2] = y_tran\r\n mat[0][0] = 1\r\n mat[1][1] = 1\r\n mat[2][2] = 1\r\n \r\n return mat\r\n\r\ndef trot2(angle, deg_or_rad = \"rad\"):\r\n \"\"\"Creates a 2D rotation matrix\r\n \r\n Parameters\r\n ----------\r\n angle : float\r\n The angle of rotation for the transformation\r\n deg_or_rad : str, optional, default = \"rad\"\r\n A label for the frame printed near its origin\r\n \r\n Returns\r\n ----------\r\n res_mat: np.array\r\n A homogenous matrix for the requested rotation\r\n \r\n \"\"\"\r\n \r\n if deg_or_rad == 'deg':\r\n angle = angle*np.pi/180\r\n res_mat = np.array([[np.cos(angle),-1*np.sin(angle),0],\r\n [np.sin(angle),np.cos(angle),0],\r\n [0,0,1]])\r\n return res_mat\r\n\r\ndef trplot2(T, frame_color='r', frame_Name=False, frame_length = 1, ax = False, axis = [-1, 5, -1, 5]):\r\n \"\"\"Plots a 2D reference frame\r\n \r\n At a minimum, a homogeneous transformation matrix for the frame must be\r\n provided. Other parameters are optional.\r\n \r\n Parameters\r\n ----------\r\n frame_color : str, optional\r\n The frame color using the Matplotlib color notation\r\n frame_Name : str, optional\r\n A label for the frame printed near its origin\r\n frame_length, int, optional\r\n The length of the arms of the frame\r\n ax: matplotlib.axes.Axes object, optional\r\n A new figure is created if ax is not provided. Otherwise the function\r\n will plot on the provided axis, ax.\r\n axis: list, optional\r\n A list with the min and max values for the figure in the form:\r\n [xmin, xmax, ymin, ymax]. Default is from -1 to 5 for both x and y.\r\n \r\n \"\"\"\r\n\r\n frame = np.array([[0,0,frame_length],\r\n [frame_length,0,0],\r\n [1,1,1]])\r\n frameR = T @ frame\r\n if ax == False:\r\n fig, ax = plt.subplots(figsize=(5,5))\r\n ax.plot()\r\n ax.set_xlim(axis[0],axis[1])\r\n ax.set_ylim(axis[2],axis[3])\r\n ax.grid(True)\r\n ax.annotate(\"\",xy=(frameR[0,0],frameR[1,0]), xytext=(frameR[0,1],frameR[1,1]),\r\n arrowprops=dict(arrowstyle=\"->\", color=frame_color,\r\n shrinkA=0,shrinkB=0))\r\n ax.annotate(\"\",xy=(frameR[0,2],frameR[1,2]), xytext=(frameR[0,1],frameR[1,1]),\r\n arrowprops=dict(arrowstyle='->',color=frame_color,\r\n shrinkA=0,shrinkB=0))\r\n if frame_Name == False:\r\n ax.annotate(r\"$x$\", xy = (frameR[0,2],frameR[1,2]), #Annotate x\r\n horizontalalignment='left', verticalalignment='bottom')\r\n ax.annotate(r\"$y$\".format(frame_Name), xy = (frameR[0,0],frameR[1,0]), #Annotate y\r\n horizontalalignment='left', verticalalignment='bottom')\r\n else:\r\n ax.annotate(\"{\"+\"{}\".format(frame_Name)+\"}\", xy = (frameR[0,1],frameR[1,1]), #Annotate the orign\r\n horizontalalignment='right', verticalalignment='top')\r\n ax.annotate(r\"$x_{{{}}}$\".format(frame_Name), xy = (frameR[0,2],frameR[1,2]), #Annotate x\r\n horizontalalignment='left', verticalalignment='bottom')\r\n ax.annotate(r\"$y_{{{}}}$\".format(frame_Name), xy = (frameR[0,0],frameR[1,0]), #Annotate y\r\n horizontalalignment='left', verticalalignment='bottom')\r\n return ax\r\n\r\ndef plot_car(x, y, yaw, truckcolor=\"k\"): # pragma: no cover\r\n \"\"\"Plots a vehicle representation at a given position\r\n \r\n Parameters\r\n ----------\r\n x : float\r\n The vehicle location in the x-direction\r\n y : float\r\n The vehicle location in the y-direction\r\n yaw : float\r\n The pose angle for the vehicle in radians\r\n truckcolor: str, optional, default = \"k\"\r\n The line format (both color and style in Matplotlib representation)\r\n for the outline of the vehicle.\r\n \r\n \"\"\" \r\n \r\n car_frame = np.array([[0,0,4.5,4.5,0],\r\n [0,2,2 ,0 ,0],\r\n [1,1,1 ,1 ,1]])\r\n \r\n BR_tire = np.array([[.5 ,.5,1.5,1.5,.5 ],\r\n [.25,.5,.5 ,.25,.25],\r\n [1 ,1 ,1 ,1 ,1 ]])\r\n \r\n BL_tire = np.array([[.5 ,.5 ,1.5 ,1.5,.5 ],\r\n [1.5,1.75,1.75,1.5,1.5],\r\n [1 ,1 ,1 ,1 ,1 ]])\r\n \r\n FR_tire = np.array([[3 ,3 ,4 ,4 ,3 ],\r\n [.25,.5,.5,.25,.25],\r\n [1 ,1 ,1 ,1 ,1 ]])\r\n \r\n FL_tire = np.array([[3 ,3 ,4 ,4 ,3 ],\r\n [1.5,1.75,1.75,1.5,1.5],\r\n [1 ,1 ,1 ,1 ,1 ]])\r\n \r\n arrow = np.array([[4,3,0],\r\n [1,1,0],\r\n [1,1,1]])\r\n \r\n car_frame_res = trot2(yaw,\"deg\")@trans12(x,y)@car_frame\r\n FL_tire_res = trot2(yaw,\"deg\")@trans12(x,y)@FL_tire\r\n FR_tire_res = trot2(yaw,\"deg\")@trans12(x,y)@FR_tire\r\n BL_tire_res = trot2(yaw,\"deg\")@trans12(x,y)@BL_tire\r\n BR_tire_res = trot2(yaw,\"deg\")@trans12(x,y)@BR_tire\r\n arrow_res = trot2(yaw,\"deg\")@trans12(x,y)@arrow\r\n\r\n fig, ax = plt.subplots(figsize=(5, 5))\r\n ax.set_xlim(-1, 5)\r\n ax.set_ylim(-1, 5)\r\n ax.grid(True)\r\n \r\n ax.plot(car_frame_res[0],car_frame_res[1],color=truckcolor)\r\n ax.plot(BL_tire_res[0],BL_tire_res[1],color=truckcolor)\r\n ax.plot(BR_tire_res[0],BR_tire_res[1],color=truckcolor) \r\n ax.plot(FL_tire_res[0],FL_tire_res[1],color=truckcolor)\r\n ax.plot(FR_tire_res[0],FR_tire_res[1],color=truckcolor)\r\n \r\n ax.annotate(\"\",xy=(arrow_res[0,0],arrow_res[1,0]), xytext=(arrow_res[0,1],arrow_res[1,1]),\r\n arrowprops=dict(arrowstyle=\"->\", color='r',\r\n shrinkA=0,shrinkB=0))\r\n \r\n return ax\r\n\r\n'''\r\nDifferential Steer\r\n=============================================================================================\r\n'''\r\n\r\ndef diffsteerm1(wr,wl,rw,W,dt,x,y,theta):\r\n \r\n angle = theta*np.pi/180\r\n vr = wr * rw\r\n vl = wl * rw\r\n V = (vr+vl)/2\r\n omega = (vr-vl)/W\r\n R = V/omega\r\n xICC = x-R*np.sin(angle)\r\n yICC = y+R*np.cos(angle)\r\n ICC = np.array([[xICC], [yICC]])\r\n rotM=np.array([[np.cos(omega*dt),-np.sin(omega*dt)],\r\n [np.sin(omega*dt), np.cos(omega*dt)]])\r\n newAngle = (omega*dt + angle)*(180/np.pi)\r\n newXY = rotM@(np.array([[x],[y]])-ICC)+ICC\r\n #newPose = np.vstack((newXY, np.array([newAngle])))\r\n newX = newXY[0,0]\r\n newY = newXY[1,0]\r\n \r\n return newX, newY, newAngle\r\n\r\ndef diffsteerm2(wr,wl,rw,W,dt,x,y,theta):\r\n \r\n angle = theta*np.pi/180 #converts to radians for calculation\r\n vr = wr * rw\r\n vl = wl * rw\r\n V = (vr+vl)/2\r\n omega = (vr-vl)/W\r\n newX=x+dt*V*np.cos(angle)\r\n newY=y+dt*V*np.sin(angle)\r\n newAngle = (omega*dt+angle)*(180/np.pi) #converts back to degrees to print the new pose\r\n #newPose = np.vstack([newX,newY,newAngle])\r\n \r\n return newX, newY, newAngle\r\n\r\ndef diffsteerm2ss(wr,wl,rw,W,dt,x,y,theta,prior_wr=0,prior_wl=0,ssl=0,ssr=0,delta=0):\r\n \r\n if (wr-prior_wr) >=0:\r\n sr=ssr\r\n vr=wr*rw*(1-sr)\r\n else:\r\n sr=ssr*-1\r\n vr = (wr*rw)/(1+sr)\r\n if (wl-prior_wl) >=0:\r\n sl=ssl\r\n vl=wl*rw*(1-sl)\r\n else:\r\n sl=ssl*-1\r\n vl=(wl*rw)/(1+sl)\r\n \r\n angle = theta*np.pi/180\r\n delta_rad = delta*np.pi/180\r\n Vlong = (vr+vl)/2\r\n omega = (vr-vl)/W \r\n Vlat = np.tan(delta_rad)*Vlong\r\n newX=x+dt*Vlong*np.cos(angle)-dt*Vlat*np.sin(angle)\r\n newY=y+dt*Vlong*np.sin(angle)+dt*Vlat*np.cos(angle)\r\n newAngle = (omega*dt+angle)*(180/np.pi)\r\n #newPose = np.vstack([newX,newY,newAngle])\r\n \r\n return newX,newY,newAngle\r\n\r\n'''\r\nAckermann Steer\r\n=====================================================================================\r\n'''\r\n\r\ndef bicycleMS4(u = np.array([[0],[1]]), q = np.array([[0],[0],[-1.57],[1],[0]]), \r\n dt = 0.01, DT = 0.01, L = 2.5, ss = 0, prior_v = 0, delta1 = 0, \r\n delta2 = 0, tauV = 0, tauSteer = 0, maxSteer = 90*np.pi/180):\r\n \"\"\"Calculates the future position of a vehicle using the bicycle model.\r\n \r\n It runs for one complete sampling interval (DT) of the speed and steering \r\n controllers. It processes vehicle state for floor(DT/dt) loops.\r\n \r\n Parameters\r\n ----------\r\n u : np.array\r\n [[desired steering angle],\r\n [desired vehicle speed]]\r\n This is an array of inputs to the control system of the vehicle.\r\n q : np.array\r\n [[x position],\r\n [y position],\r\n [vehicle angle],\r\n [vehicle velocity],\r\n [vehicle steering angle]]\r\n This is an array of state variables describing the current state of the vehicle.\r\n dt : float\r\n The integration time step of the kinematic model\r\n DT : float\r\n The sampling interval of the steering and speed controller. Must be longer than dt to have an effect.\r\n L : float\r\n The wheelbase of the vehicle. The distance between the front and rear axles.\r\n ss : float\r\n The slip ratio of the rear wheel\r\n prior_v : float\r\n The prior velocity of the vehicle. Used to determine if the vehicle is braking or driving.\r\n delta1 : float\r\n The slip angle of the front wheel\r\n delta2 : float\r\n The slip angle of the rear wheel \r\n tauV : float\r\n The time constant of the speed control subsystem\r\n tauSteer : float\r\n The time constant of the steering control subsystem\r\n maxSteer : float\r\n The maximum turning angle. It applies to left and right turning.\r\n \r\n \r\n Returns\r\n ----------\r\n q : np.array\r\n The state of the vehicle after the greater of dt or \r\n DT (actually dt*floor(DT/dt)) time passes.\r\n \"\"\"\r\n #Check our input. We need min and max steering angles.\r\n if u[0,0]>maxSteer:\r\n u[0,0] = maxSteer\r\n if u[0,0]<-1*maxSteer:\r\n u[0,0] = -1*maxSteer\r\n # BEGIN dt CONTROL LOOP HERE:\r\n # You should determine the number of interations and size your arrays before starting the loop.\r\n num_loops = int(DT/dt)+1\r\n newq = np.zeros((5, num_loops)) # Each column contains a different time step.\r\n # Column 0 of newq will be state at time 0, i.e. initial state, q.\r\n newq[0, 0] = q[0, 0]\r\n newq[1, 0] = q[1, 0]\r\n newq[2, 0] = q[2, 0]\r\n newq[3, 0] = q[3, 0]\r\n newq[4, 0] = q[4, 0]\r\n for i in range(1,num_loops):\r\n if newq[4,0]>maxSteer:\r\n newq[4,0] = maxSteer\r\n if newq[4,0]<-1*maxSteer:\r\n newq[4,0] = -1*maxSteer\r\n if tauV < dt:\r\n tauV = dt\r\n if tauSteer < dt:\r\n tauSteer = dt \r\n newq[3,i] = newq[3,i-1]*(1-dt/tauV)+(dt/tauV)*u[1,0] \r\n newq[4,i] = newq[4,i-1]*(1-dt/tauSteer)+(dt/tauSteer)*u[0,0] # Steering Control Equation\r\n # Test if we are braking or driving. If the new vecolity newq[3,0] \r\n # is greater than current velocity q[3,0], we assume that we will be accelerating/driving.\r\n if (newq[3,i]-newq[3,i-1]) >=0: #driving \r\n s = ss\r\n vlong = newq[3,i] * (1-s) #driving: wheel velocity\r\n else: #braking\r\n s = ss*-1\r\n vlong = (newq[3,i]) / (1+s) #braking: wheel velocity\r\n vlat = vlong*np.tan(delta2)\r\n newq[0,i] = newq[0,i-1]+dt*(vlong*np.cos(newq[2,i-1]) - vlat*np.sin(newq[2,i-1]))\r\n newq[1,i] = newq[1,i-1]+dt*(vlong*np.sin(newq[2,i-1]) + vlat*np.cos(newq[2,i-1]))\r\n newq[2,i] = newq[2,i-1]+(dt*(vlong*np.tan(newq[4,i-1]+delta1)/L-vlat/L))\r\n #END dt CONTROL LOOP HERE\r\n return newq\r\n\r\n'''\r\nMove to Pose & Point\r\n==========================================================================================\r\n'''\r\n\r\ndef angdiff(angle1, angle2):\r\n \"\"\"Determines the smallest difference between two angles\r\n\r\n Result will be in the range [-pi, pi)\r\n\r\n Parameters\r\n ----------\r\n angle1 : float\r\n The first angle in radians\r\n angle2 : float\r\n The second angle in radians\r\n\r\n Returns\r\n ----------\r\n a : float\r\n The smallest angle between angle1 and angle2\r\n \"\"\"\r\n a = angle1 - angle2\r\n while a < (-1 * np.pi):\r\n a = a + 2 * np.pi\r\n while a >= np.pi:\r\n a = a - 2 * np.pi\r\n return a\r\n\r\n\r\ndef moveToPose(current_x, current_y, current_angle,\r\n desired_x, desired_y, desired_angle,\r\n k_rho, k_alpha, k_beta, wheelbase):\r\n \"\"\"Implements a simple proportional controller for Move to Point\r\n For stability with Ackermann style steering (bicycle model):\r\n k_rho > 0; k_beta < 0; k_alpha - k_rho > 0\r\n\r\n Parameters\r\n ----------\r\n current_x : float\r\n The current x position of the robot\r\n current_y : float\r\n The current y position of the robot\r\n current_angle : float\r\n The current heading angle of the robot\r\n desired_x : float\r\n The desired x position of the robot\r\n desired_y : float\r\n The desired y position of the robot\r\n desired_angle : float\r\n The desired heading angle of the robot\r\n k_rho : float\r\n The proportional gain for direction to point (velocity)\r\n k_alpha : float\r\n The proportional gain for heading to point\r\n k_beta : float\r\n The proportional gain for heading once final point is reached\r\n wheelbase : float\r\n The distance between the front and rear axles. Units should be\r\n consistent with x & y units.\r\n\r\n Returns\r\n ----------\r\n newV : float\r\n The newly calculated velocity for the robot\r\n newSteer : float\r\n The newly calculated steering angle for the robot in radians\r\n \"\"\"\r\n rho = np.sqrt(np.float_power(desired_x - current_x, 2)\r\n + np.float_power(desired_y - current_y, 2))\r\n alpha = (np.arctan((desired_y - current_y) / (desired_x - current_x))\r\n - current_angle)\r\n beta = -1 * current_angle - alpha + desired_angle\r\n newV = k_rho * rho\r\n omega = k_alpha * alpha + k_beta*beta\r\n newSteer = np.arctan((omega * wheelbase) / newV)\r\n return newV, newSteer\r\n\r\n\r\ndef moveToPoint(current_x, current_y, current_angle,\r\n desired_x, desired_y, kv, kh):\r\n \"\"\"Implements a simple proportional controller for Move to Point\r\n\r\n Parameters\r\n ----------\r\n current_x : float\r\n The current x position of the robot\r\n current_y : float\r\n The current y position of the robot\r\n current_angle : float\r\n The current heading angle of the robot\r\n desired_x : float\r\n The desired x position of the robot\r\n desired_y : float\r\n The desired y position of the robot\r\n kv : float\r\n The proportional gain for velocity\r\n kh : float\r\n The proportional gain for heading/steering angle, kh > 0.\r\n\r\n Returns\r\n ----------\r\n newV : float\r\n The newly calculated velocity for the robot\r\n newSteer : float\r\n The newly calculated steering angle for the robot\r\n \"\"\"\r\n newV = kv * np.sqrt(np.float_power(desired_x - current_x, 2)\r\n + np.float_power(desired_y - current_y, 2))\r\n desired_angle = np.arctan((desired_y - current_y) /\r\n (desired_x - current_x))\r\n newSteer = kh * angdiff(desired_angle, current_angle)\r\n return newV, newSteer\r\n\r\ndef moveToPoseConstV(current_x, current_y, current_angle, current_V,\r\n desired_x, desired_y, desired_angle,\r\n k_alpha, k_beta, wheelbase):\r\n \"\"\"Implements a simple proportional controller for Move to Pose\r\n For stability with Ackermann style steering (bicycle model):\r\n k_beta < 0; k_alpha > 0?\r\n This version of Move to Pose only adjusts steering. Velocity is assumed\r\n constant. Calculation of the steering angle depends on Velocity, so it\r\n must be provided as an input to the function.\r\n\r\n Parameters\r\n ----------\r\n current_x : float\r\n The current x position of the robot\r\n current_y : float\r\n The current y position of the robot\r\n current_angle : float\r\n The current heading angle of the robot\r\n current_V : float\r\n The current velocity of the robot\r\n desired_x : float\r\n The desired x position of the robot\r\n desired_y : float\r\n The desired y position of the robot\r\n desired_angle : float\r\n The desired heading angle of the robot\r\n k_alpha : float\r\n The proportional gain for heading to point\r\n k_beta : float\r\n The proportional gain for heading once final point is reached\r\n wheelbase : float\r\n The distance between the front and rear axles. Units should be\r\n consistent with x & y units.\r\n\r\n Returns\r\n ----------\r\n newSteer : float\r\n The newly calculated steering angle for the robot in radians\r\n \"\"\"\r\n\r\n alpha = (np.arctan2((desired_y - current_y), (desired_x - current_x))\r\n - current_angle)\r\n beta = -1 * current_angle - alpha + desired_angle\r\n omega = k_alpha * alpha + k_beta*beta\r\n newSteer = np.arctan((omega * wheelbase) / current_V)\r\n return newSteer\r\n\r\n\r\ndef moveToPointConstV(current_x, current_y, current_angle,\r\n desired_x, desired_y, kh):\r\n \"\"\"Implements a simple proportional controller for Move to Point\r\n\r\n Parameters\r\n ----------\r\n current_x : float\r\n The current x position of the robot\r\n current_y : float\r\n The current y position of the robot\r\n current_angle : float\r\n The current heading angle of the robot\r\n desired_x : float\r\n The desired x position of the robot\r\n desired_y : float\r\n The desired y position of the robot\r\n kh : float\r\n The proportional gain for heading/steering angle, kh > 0.\r\n\r\n Returns\r\n ----------\r\n newSteer : float\r\n The newly calculated steering angle for the robot\r\n \"\"\"\r\n desired_angle = np.arctan2((desired_y - current_y),\r\n (desired_x - current_x))\r\n newSteer = kh * angdiff(desired_angle, current_angle)\r\n return newSteer\r\n\r\n'''\r\nPath Following: Pure Pursuit & Maneuvers\r\n==================================================================================\r\n'''\r\n\r\ndef purePursuitController(q=np.array([[0], [0], [-1.57], [1], [0]]),\r\n L=2.5, ld=10,\r\n path=np.array((np.linspace(0, 10, 11),\r\n np.linspace(0, 10, 11)))):\r\n \"\"\"Runs a pure pursuit controller to follow a path.\r\n\r\n Parameters\r\n ----------\r\n q : np.array\r\n [[x position],\r\n [y position],\r\n [vehicle angle],\r\n [vehicle velocity],\r\n [vehicle steering angle]]\r\n This is an array of state variables describing the current\r\n state of the vehicle.\r\n L : float\r\n The wheelbase of the vehicle.\r\n The distance between the front and rear axles.\r\n ld : float\r\n The look ahead distance.\r\n path : np.array\r\n [[x1, x2, x3,...]\r\n [y1, y2, y3,...]]\r\n The path that the vehicle is to follow. It is defined by a set of\r\n x,y points.\r\n\r\n Returns\r\n ----------\r\n steerAngle : np.array\r\n The angle at which the steering wheel should be set\r\n distanceMin : np.array\r\n The cross track error (XTE) of the vehicle current position. \r\n \"\"\"\r\n robotX = q[0, 0]\r\n robotY = q[1, 0]\r\n robotAngle = q[2, 0]\r\n min_ld_index = 0\r\n min_XTE_index = 0\r\n # Calculate the first point and save as the initial minimum distance\r\n # We are calculating a minimum to ld and a minimum to robot.\r\n pathX = path[0, 0]\r\n pathY = path[1, 0]\r\n distanceMinld = np.abs(np.sqrt(np.float_power(robotX - pathX, 2)\r\n + np.float_power(robotY - pathY, 2)) - ld)\r\n distanceMin = np.sqrt(np.float_power(robotX - pathX, 2)\r\n + np.float_power(robotY - pathY, 2))\r\n path_len = path.shape[1]\r\n # Check every point in the path to see which provides the minXTE and which\r\n # is closest to ld from the robot.\r\n for i in range(path_len):\r\n pathX = path[0, i]\r\n pathY = path[1, i]\r\n distance = np.sqrt(np.float_power(robotX - pathX, 2)\r\n + np.float_power(robotY - pathY, 2))\r\n if (distance < distanceMin):\r\n min_XTE_index = i\r\n distanceMin = distance\r\n if (np.abs(distance - ld) < distanceMinld):\r\n min_ld_index = i\r\n distanceMinld = np.abs(distance - ld)\r\n\r\n # To calculate ey, express our path in the robot frame\r\n fShiftAngle = -1 * robotAngle\r\n fShiftX = -1 * robotX\r\n fShiftY = -1 * robotY\r\n matTRbad = np.array([[np.cos(fShiftAngle), -1 * np.sin(fShiftAngle), fShiftX],\r\n [np.sin(fShiftAngle), np.cos(fShiftAngle), fShiftY],\r\n [0, 0, 1]])\r\n matT = np.array([[fShiftX],\r\n [fShiftY]])\r\n matR = np.array([[np.cos(fShiftAngle), -1 * np.sin(fShiftAngle)],\r\n [np.sin(fShiftAngle), np.cos(fShiftAngle)]])\r\n path_robot_framebad = matTRbad @ np.vstack((path, np.ones((1, path.shape[1]))))\r\n path_robot_frameT = matT + path\r\n path_robot_frameTR = matR @ path_robot_frameT\r\n ey = path_robot_frameTR[1, min_ld_index]\r\n steerAngle = np.arctan((2*ey*L)/(np.float_power(ld, 2)))\r\n return steerAngle, distanceMin\r\n\r\ndef piTurn(r_min, w_row, rows):\r\n \"\"\"Determines a path (set of points) representing a pi turn.\r\n\r\n The resulting path starts at 0,0 with a angle of 0 deg. (pose = 0,0,0). It\r\n will turn left or right depending on if rows is positive (right turn) or\r\n negative (left turn). Path should be translated and rotated to its proper\r\n position in the field by the calling function.\r\n\r\n Parameters\r\n ----------\r\n r_min : float\r\n Turning radius of the vehicle.\r\n w_row : float\r\n The width of a row in the field.\r\n rows : int\r\n The number of rows between the current row and the target row\r\n e.g. Vehicle is turning from the mid-point of row i\r\n into the mid-point of row i+N\r\n\r\n Returns\r\n ----------\r\n path : np.array\r\n [[x1, x2, x3,...]\r\n [y1, y2, y3,...]]\r\n The path that the vehicle is to follow. It is defined by a set of\r\n x,y points.\r\n distance : float\r\n The length of the path that accomplishes the requested pi-turn.\r\n \"\"\"\r\n # First check if a pi turn is possible\r\n if rows * np.abs(w_row) < 2 * r_min:\r\n path = np.zeros((0, 0)) # Turn is not possible. Path is empty\r\n distance = np.nan # Distance cannot be calculated\r\n return (path, distance)\r\n\r\n d = rows * w_row # distance from start path to end path\r\n if d > 0: # Turn to the right\r\n # Create the starting arc for leaving\r\n # the initial path (60 points+endpoint)\r\n a = np.linspace(-np.pi/2, 0, 61)\r\n x_start = 0 + r_min * np.cos(a)\r\n y_start = -1 * r_min - r_min * np.sin(a)\r\n # Create the final arc for entering\r\n # the target path (60 points+endpoint)\r\n a = np.linspace(0, np.pi/2, 61)\r\n x_end = 0 + r_min * np.cos(a)\r\n y_end = -1 * rows * w_row + r_min - r_min * np.sin(a)\r\n # Create straight section if necessary\r\n if rows * w_row == 2 * r_min: # no straight section. Connect arcs\r\n # The first point in x_end repeats x_start.\r\n # Same for y_end and y_start\r\n x = np.hstack((x_start, x_end[1:]))\r\n y = np.hstack((y_start, y_end[1:]))\r\n path = np.array((x, y))\r\n else:\r\n # Create straight section\r\n x_straight = np.linspace(x_start[-1], x_end[0], 61)\r\n y_straight = np.linspace(y_start[-1], y_end[0], 61)\r\n # Connect segments. Once again each segment repeats the start\r\n # and end.\r\n x = np.hstack((x_start, x_straight[1:], x_end[1:]))\r\n y = np.hstack((y_start, y_straight[1:], y_end[1:]))\r\n else:\r\n # Create the starting arc for leaving\r\n # the initial path (60 points+endpoint)\r\n a = np.linspace(np.pi/2, 0, 61)\r\n x_start = 0 + r_min * np.cos(a)\r\n y_start = r_min - r_min * np.sin(a)\r\n # Create the final arc for entering\r\n # the target path (60 points+endpoint)\r\n a = np.linspace(0, -1 * np.pi/2, 61)\r\n x_end = 0 + r_min * np.cos(a)\r\n y_end = -rows * w_row - r_min - r_min * np.sin(a)\r\n # Create straight section if necessary\r\n if rows * w_row == 2 * r_min: # no straight section. Connect arcs\r\n # The first point in x_end repeats x_start.\r\n # Same for y_end and y_start\r\n x = np.hstack((x_start, x_end[1:]))\r\n y = np.hstack((y_start, y_end[1:]))\r\n path = np.array((x, y))\r\n else:\r\n # Create straight section\r\n x_straight = np.linspace(x_start[-1], x_end[0], 61)\r\n y_straight = np.linspace(y_start[-1], y_end[0], 61)\r\n # Connect segments. Once again each segment repeats the start\r\n # and end.\r\n x = np.hstack((x_start, x_straight[1:], x_end[1:]))\r\n y = np.hstack((y_start, y_straight[1:], y_end[1:]))\r\n path = np.array((x, y))\r\n distance = rows * w_row + (np.pi - 2) * r_min\r\n return path, distance\r\n\r\n\r\ndef omegaTurn(r_min, w_row, rows):\r\n \"\"\"Determines a path (set of points) representing a omega turn.\r\n\r\n The resulting path starts at 0,0 with a angle of 0 deg. (pose = 0,0,0). It\r\n will turn left or right depending on if rows is positive (right turn) or\r\n negative (left turn). Path should be translated and rotated to its proper\r\n position in the field by the calling function.\r\n\r\n Parameters\r\n ----------\r\n r_min : float\r\n Turning radius of the vehicle.\r\n w_row : float\r\n The width of a row in the field.\r\n rows : int\r\n The number of rows between the current row and the target row\r\n e.g. Vehicle is turning from the mid-point of row i\r\n into the mid-point of row i+N\r\n\r\n Returns\r\n ----------\r\n path : np.array\r\n [[x1, x2, x3,...]\r\n [y1, y2, y3,...]]\r\n The path that the vehicle is to follow. It is defined by a set of\r\n x,y points.\r\n distance : float\r\n The length of the path that accomplishes the requested pi-turn.\r\n \"\"\"\r\n # First check if a omega turn is possible\r\n d = rows * w_row # distance from start path to end path\r\n if rows * w_row > 2 * r_min:\r\n path = np.zeros((0, 0)) # Turn is not possible. Path is empty\r\n distance = np.nan # Distance cannot be calculated\r\n return (path, distance)\r\n\r\n if d > 0: # Turn to the right\r\n # Create the starting arc for leaving the path (60 points+endpoint)\r\n # Arc starts at pi/2 and rotates up/back toward 0, angle will be alpha\r\n alpha = np.arccos((r_min + d / 2) / (2 * r_min))\r\n a = np.linspace(np.pi / 2, np.pi / 2 - alpha, 61)\r\n x_start = 0 + r_min * np.cos(a)\r\n y_start = r_min - r_min * np.sin(a)\r\n # Create the final arc for entering the path (60 points+endpoint)\r\n a = np.linspace(-1 * np.pi / 2 + alpha, -1 * np.pi/2, 61)\r\n x_end = 0 + r_min * np.cos(a)\r\n y_end = -1 * d - r_min - r_min * np.sin(a)\r\n # Create bulb section\r\n bulb_center_x = 2 * r_min * np.sqrt(1 -\r\n np.float_power((r_min + d / 2) /\r\n (2 * r_min), 2))\r\n bulb_center_y = -1 * d / 2\r\n a = np.linspace(-1 * np.pi/2 - alpha, np.pi / 2 + alpha, 61)\r\n x_bulb = bulb_center_x + r_min * np.cos(a)\r\n y_bulb = bulb_center_y - r_min * np.sin(a)\r\n else:\r\n # Create the starting arc for leaving the path (60 points+endpoint)\r\n d = d * -1\r\n # Arc starts at pi/2 and rotates up/back toward 0, angle will be alpha\r\n alpha = np.arccos((r_min + d / 2) / (2 * r_min))\r\n a = np.linspace(-1 * np.pi/2, -1 * np.pi / 2 + alpha, 61)\r\n x_start = 0 + r_min * np.cos(a)\r\n y_start = -1 * r_min - r_min * np.sin(a)\r\n # Create the final arc for entering the path (60 points+endpoint)\r\n a = np.linspace(np.pi / 2 - alpha, np.pi / 2, 61)\r\n x_end = 0 + r_min * np.cos(a)\r\n y_end = d + r_min - r_min * np.sin(a)\r\n # Create bulb section\r\n bulb_center_x = 2 * r_min * np.sqrt(1 -\r\n np.float_power((r_min + d / 2) /\r\n (2 * r_min), 2))\r\n bulb_center_y = d / 2\r\n a = np.linspace(np.pi / 2 + alpha, -1 * np.pi/2 - alpha, 61)\r\n x_bulb = bulb_center_x + r_min * np.cos(a)\r\n y_bulb = bulb_center_y - r_min * np.sin(a)\r\n # Connect segments. Each segment repeats the start and end.\r\n x = np.hstack((x_start, x_bulb[1:], x_end[1:]))\r\n y = np.hstack((y_start, y_bulb[1:], y_end[1:]))\r\n path = np.array((x, y))\r\n distance = (4 * alpha + np.pi) * r_min\r\n return path, distance\r\n\r\n\r\ndef dist(x1, y1, x2, y2):\r\n d = np.sqrt(np.float_power(x1 - x2, 2)\r\n + np.float_power(y1 - y2, 2))\r\n return d\r\n\r\n'''\r\nArea Coverage\r\n================================================================================\r\n'''\r\n\r\ndef pathsToTSP(paths):\r\n \"\"\"Converts paths into TSP nodes and creates a distance matrix\r\n\r\n This function takes the paths created in the fieldToPaths function\r\n and creates nodes that describe the endpoints of the paths.\r\n\r\n Parameters\r\n ----------\r\n paths : np.array\r\n [[[line0_x0, line0_x1],\r\n [line0_y0, line0_y1]],\r\n [[line1_x0, line1_x1],\r\n [line1_y0, line1_y1]],\r\n ... ]\r\n The paths that the vehicle is to follow. It is defined by a sets of\r\n x,y points representing the endpoints of the paths. It is a three\r\n dimensional array with the first dimension representing the path\r\n and then a 2D array of points below that.\r\n\r\n Returns\r\n ----------\r\n tsp_nodes: np.array\r\n [[x0, x1, x2, x3,...],\r\n [y0, y1, y2, y3,...]]\r\n\r\n The points that represent the nodes for the TSP problem.\r\n The nodes are derived from endpoints in the paths array.\r\n distance: np.array\r\n [[d00, d10, d20, d30,...],\r\n [d01, d11, d21, d31,...]\r\n ...]\r\n A distance matrix of the distances (d_xy) between node x and node y.\r\n \"\"\"\r\n\r\n tsp_nodes = np.array([np.hstack(paths[:, 0, :]),\r\n np.hstack(paths[:, 1, :])])\r\n\r\n distances = np.zeros((np.shape(tsp_nodes)[1], np.shape(tsp_nodes)[1]))\r\n for node1 in range(0, np.shape(tsp_nodes)[1]):\r\n for node2 in range(node1, np.shape(tsp_nodes)[1]):\r\n truth_arr = np.zeros(np.shape(paths)[0]) #<----- beginning of modifications\r\n for i in range(0,np.shape(paths)[0]):\r\n if (tsp_nodes[0][node1] == paths[i,0,0] and tsp_nodes[1][node1] == paths[i,1,0]) or (\r\n tsp_nodes[0][node1] == paths[i,0,1] and tsp_nodes[1][node1] == paths[i,1,1]):\r\n node1path = paths[i]\r\n else: \r\n node1path = 0 # arbitrary value different from node2path\r\n if (tsp_nodes[0][node2] == paths[i,0,0] and tsp_nodes[1][node2] == paths[i,1,0]) or (\r\n tsp_nodes[0][node2] == paths[i,0,1] and tsp_nodes[1][node2] == paths[i,1,1]):\r\n node2path = paths[i]\r\n else: \r\n node2path = 1 # arbitrary value different from node1path\r\n truth_arr[i] = np.array_equal(node1path,node2path)\r\n \r\n if np.any(truth_arr) == True:\r\n dist = 0\r\n else: \r\n dist = np.sqrt((tsp_nodes[0][node1] -\r\n tsp_nodes[0][node2]) ** 2 +\r\n (tsp_nodes[1][node1] -\r\n tsp_nodes[1][node2]) ** 2) #<----- end of modifications\r\n distances[node1, node2] = dist\r\n distances[node2, node1] = dist\r\n return tsp_nodes, distances\r\n\r\n\r\ndef tspToSolution1(nodes, cost_mat):\r\n \"\"\"Creates a solution to the TSP problem.\r\n Converts nodes to Cities and uses function from TSP notebook\r\n Cities has now been redefined as its own type of class instead of a\r\n complex point. It includes a varible to record its node number so that it\r\n can be referenced later.\r\n\r\n Parameters\r\n ----------\r\n nodes : np.array\r\n [[x0, x1, x2...]\r\n [y0, y1, y2...]]\r\n\r\n\r\n Returns\r\n ----------\r\n tour : list of Cities\r\n \"\"\"\r\n # define neccessary functions from TSP notebook\r\n def cost(A, B):\r\n return cost_mat[A.num, B.num]\r\n\r\n def shortest_edges_first(cities):\r\n # Return all edges between distinct cities, sorted shortest first.\"\r\n edges = [(A, B) for A in cities for B in cities\r\n if id(A) < id(B)]\r\n return sorted(edges, key=lambda edge: cost(*edge))\r\n\r\n def join_endpoints(endpoints, A, B):\r\n # Join B's segment onto the end of A's and return the segment.\r\n # Maintain endpoints dict.\"\r\n Asegment, Bsegment = endpoints[A], endpoints[B]\r\n if Asegment[-1] is not A:\r\n Asegment.reverse()\r\n if Bsegment[0] is not B:\r\n Bsegment.reverse()\r\n Asegment.extend(Bsegment)\r\n del endpoints[A], endpoints[B] # A and B are no longer endpoints\r\n endpoints[Asegment[0]] = endpoints[Asegment[-1]] = Asegment\r\n return Asegment\r\n\r\n def greedy_tsp(cities):\r\n \"\"\"Go through edges, shortest first.\r\n Use edge to join segments if possible.\"\"\"\r\n endpoints = {c: [c] for c in cities}\r\n for (A, B) in shortest_edges_first(cities):\r\n if (A in endpoints and B in endpoints and\r\n endpoints[A] != endpoints[B]):\r\n new_segment = join_endpoints(endpoints, A, B)\r\n if len(new_segment) == len(cities):\r\n return new_segment\r\n\r\n # start of additional code\r\n\r\n # converting nodes into a list of cities\r\n class Node():\r\n def __init__(self, x, y, num):\r\n self.x = x\r\n self.y = y\r\n self.num = num\r\n\r\n City = Node\r\n cities = [City(nodes[0, i], nodes[1, i], i) for i in range(nodes.shape[1])]\r\n\r\n # apply greedy algorithm\r\n tour = greedy_tsp(cities)\r\n\r\n return tour\r\n\r\n\r\ndef routeToField(tour, start_point):\r\n \"\"\"Converts a TSP route of nodes into waypoints to follow in a field.\r\n Returns a list of the points to go to.\r\n\r\n Parameters\r\n ----------\r\n tour : list of Cities\r\n [Node3 (3, 7), Node6 (7, 5), ...]\r\n\r\n start_point: [x0, y0]\r\n\r\n\r\n Returns\r\n ----------\r\n waypoint_list : np.array\r\n [[x0, x1, x2...]\r\n [y0, y1, y2...]]\r\n \"\"\"\r\n\r\n for count, node in enumerate(tour):\r\n if(node.x == start_point[0] and node.y == start_point[1]):\r\n start = node\r\n start_pos = count\r\n waypoint = [[], []]\r\n for count, node in enumerate(tour):\r\n xi = int(tour[(start_pos + count) % len(tour)].x)\r\n yi = int(tour[(start_pos + count) % len(tour)].y)\r\n point = [[xi], [yi]]\r\n waypoint = np.hstack((waypoint, point))\r\n waypoint = np.hstack((waypoint, [[start.x], [start.y]]))\r\n return waypoint\r\n"
] | [
[
"numpy.hstack",
"numpy.abs",
"numpy.arctan",
"numpy.linspace",
"numpy.float_power",
"numpy.array_equal",
"numpy.sqrt",
"matplotlib.pyplot.subplots",
"numpy.arccos",
"numpy.sin",
"numpy.arctan2",
"numpy.tan",
"numpy.cos",
"numpy.ones",
"numpy.shape",
"numpy.any",
"numpy.array",
"numpy.zeros"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
d-v-dlee/super_liga_xg | [
"e3054cadf00755a347d31ce335c567db91e433a7"
] | [
"src/xlg.py"
] | [
"import pandas as pd\nfrom json_shot_scraper import flatten_shot, flatten_complete_pass, flatten_corner\nfrom dataframe_cleaner import (pass_to_shot, corner_to_shot, transpose_coordinates,\ncoord_to_yards, shot_distance_angle, drop_own_goals, goal_dummy, dummy_columns)\n\ndef game_to_single_df(game):\n \"\"\"\n This function calls upon 'flatten_shot', 'flatten_complete_pass', 'flatten_corner'\n from json_shot_scraper.py\n\n This function calls upon 'pass_to_shot', 'corner_to_shot', 'transpose_coordinates', 'coord_to_yards',\n 'shot_distance_angle', 'drop_own_goals', and 'goal_dummy' from dataframe_cleaner.py\n\n Parameters\n -----------------\n game: individual games(json data) from mongodb by db.games.find()\n\n Returns\n -----------------\n df_final: a dataframe that takes original input coordinate data and adds\n the columns 'shot_distance', 'shot_angle', 'assisted_shot', 'is_penalty_attempt', 'is_goal'\n \"\"\"\n shots = list(game['incidences']['shots'].items())\n game_id = game['match']['matchId']\n shot_list_dicts = [flatten_shot(shot, game_id) for shot in shots]\n shot_df = pd.DataFrame(shot_list_dicts)\n \n completed_passes = list(game['incidences']['correctPasses'].items())\n completed_list_dicts = [flatten_complete_pass(apass, game_id) for apass in completed_passes]\n completed_passes_df = pd.DataFrame(completed_list_dicts)\n\n shot_pass_df = pass_to_shot(shot_df, completed_passes_df)\n\n corners = list(game['incidences']['cornerKicks'].items())\n if len(corners) > 0:\n corner_dicts = [flatten_corner(kick, game_id) for kick in corners]\n corner_df = pd.DataFrame(corner_dicts)\n\n shot_pass_corner = corner_to_shot(shot_pass_df, corner_df)\n transposed_df = transpose_coordinates(shot_pass_corner)\n else:\n transposed_df = transpose_coordinates(shot_pass_df)\n\n yard_df = coord_to_yards(transposed_df)\n\n shot_distance_df = shot_distance_angle(yard_df)\n\n df = dummy_columns(shot_distance_df)\n df_no_own = drop_own_goals(df)\n df_final = goal_dummy(df_no_own)\n return df_final\n\n\ndef create_frame():\n \"\"\"\n Parameters\n -----------------\n None\n\n Returns\n -----------------\n attach_to_df: empty dataframe to concat to\n \"\"\"\n attach_to_df = pd.DataFrame(columns=['game_id', 'player_id', 'shot_coord_x1', 'shot_coord_x2',\n 'shot_coord_y1', 'shot_coord_y2', 'shot_coord_z1', 'shot_coord_z2',\n 'shot_id', 'shot_type', 'team_id', 'time_of_event(min)',\n 'passed_from_id', 'pass_coord_x1', 'pass_coord_x2', 'pass_coord_y1',\n 'pass_coord_y2', 'pass_coord_z1', 'pass_coord_z2', 'corner_kick',\n 'shot_distance', 'shot_angle', 'assisted_shot', 'is_penalty_attempt',\n 'is_goal'])\n return attach_to_df\n\n\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
xdze2/trilobot | [
"bca16bf5ddc5f8977f8a71c6331100a51c909c1a"
] | [
"scripts/liveview.py"
] | [
"import time\n\nimport cv2\nimport numpy as np\nimport zmq\n\n# https://zguide.zeromq.org/docs/chapter2/#Pub-Sub-Message-Envelopes\n\n\ndef main():\n \"\"\"main method\"\"\"\n ip_address = \"192.168.1.72\"\n\n context = zmq.Context()\n subscriber = context.socket(zmq.SUB)\n subscriber.connect(f\"tcp://{ip_address}:5563\")\n subscriber.setsockopt(zmq.SUBSCRIBE, b\"cam\")\n\n while True:\n [_address, contents] = subscriber.recv_multipart()\n show(contents)\n\n # We never get here but clean up anyhow\n subscriber.close()\n context.term()\n\n\ncv2.namedWindow(\"liveview\")\n\n\ndef show(jpeg_frame):\n\n img = cv2.imdecode(\n np.frombuffer(jpeg_frame, dtype=np.uint8), cv2.IMREAD_UNCHANGED\n )\n\n cv2.imshow(\"liveview\", img)\n if cv2.waitKey(1) == ord(\"q\"):\n print(\"exit\")\n cv2.destroyAllWindows()\n exit()\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.frombuffer"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
tian961214/RFB_ESRGAN-PyTorch | [
"0104e8e9b172065e94ac07bf65d9bb9a1898425e"
] | [
"data/create_dataset_for_kernelGAN.py"
] | [
"# Copyright 2020 Dakewe Biotech Corporation. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport argparse\nimport glob\nimport os\n\nimport numpy as np\nimport torch.utils.data\nimport torchvision.transforms as transforms\nfrom KernelGAN.imresize import imresize\nfrom PIL import Image\nfrom scipy.io import loadmat\nfrom tqdm import tqdm\n\nfrom rfb_esrgan_pytorch import check_image_file\nfrom rfb_esrgan_pytorch import utils\n\nparser = argparse.ArgumentParser(description=\"Using the image distribution algorithm learned in kernelgan to \"\n \"construct pairwise hyperspectral data.\")\nparser.add_argument(\"--input-dir\", type=str, required=True,\n help=\"Folder for low resolution images.\")\nparser.add_argument(\"--target-dir\", type=str, required=True,\n help=\"Folder for high resolution images.\")\nparser.add_argument(\"--cleanup-factor\", default=2, type=int,\n help=\"downscaling factor for image cleanup. (default: 2).\")\nparser.add_argument(\"--upscale-factor\", default=4, type=int,\n help=\"upscale factor for image. (default: 4).\")\nparser.add_argument(\"--kernel-dir\", default=\"./KernelGAN/results\", type=str,\n help=\"Using the fuzzy kernel Gan algorithm to extract the folder. (default: `./KernelGAN/results`)\")\nargs = parser.parse_args()\n\nlr_dir = f\"./{args.upscale_factor}x/input\"\nhr_dir = f\"./{args.upscale_factor}x/target\"\nlr_files = [os.path.join(args.input_dir, x) for x in os.listdir(args.input_dir) if check_image_file(x)]\nhr_files = [os.path.join(args.target_dir, x) for x in os.listdir(args.target_dir) if check_image_file(x)]\n\ntry:\n os.makedirs(lr_dir)\n os.makedirs(hr_dir)\nexcept OSError:\n pass\n\npil2tensor = transforms.ToTensor()\ntensor2pil = transforms.ToPILImage()\n\n# Get all kernelGAN distribute file\nkernel_paths = glob.glob(os.path.join(args.kernel_dir, f\"*/*_kernel_{args.upscale_factor}x.mat\"))\n\n\ndef process_for_lr():\n r\"\"\" The low resolution data set is preliminarily processed.\n \"\"\"\n for filename in tqdm(lr_files, desc=\"Generating images from lr dir\"):\n img = Image.open(filename)\n img = pil2tensor(img)\n\n # Remove noise\n img = utils.imresize(img, 1.0 / args.cleanup_factor, True)\n _, w, h = img.size()\n w = w - w % args.upscale_factor\n h = h - h % args.upscale_factor\n img = img[:, :w, :h]\n\n # Save high resolution img\n img = tensor2pil(img)\n img.save(os.path.join(hr_dir, os.path.basename(filename)), \"bmp\")\n\n # The noise distribution obtained in kernelGAN is used to adjust the image.\n kernel_path = kernel_paths[np.random.randint(0, len(kernel_paths))]\n mat = loadmat(kernel_path)\n k = np.array([mat[\"Kernel\"]]).squeeze()\n img = imresize(np.array(img), scale_factor=1.0 / args.upscale_factor, kernel=k)\n\n # Save low resolution img\n img = tensor2pil(img)\n img.save(os.path.join(lr_dir, os.path.basename(filename)), \"bmp\")\n\n\ndef process_for_hr():\n r\"\"\" The high resolution data set is preliminarily processed.\n \"\"\"\n for filename in tqdm(hr_files, desc=\"Generating images from hr dir\"):\n img = Image.open(filename)\n img = pil2tensor(img)\n\n # Save high resolution img\n img = tensor2pil(img)\n img.save(os.path.join(hr_dir, os.path.basename(filename)), \"bmp\")\n\n # The noise distribution obtained in kernelGAN is used to adjust the image.\n kernel_path = kernel_paths[np.random.randint(0, len(kernel_paths))]\n mat = loadmat(kernel_path)\n k = np.array([mat[\"Kernel\"]]).squeeze()\n img = imresize(np.array(img), scale_factor=1.0 / args.upscale_factor, kernel=k)\n\n # Save low resolution img\n img = tensor2pil(img)\n img.save(os.path.join(lr_dir, os.path.basename(filename)), \"bmp\")\n\n\nif __name__ == \"__main__\":\n with torch.no_grad():\n process_for_lr()\n process_for_hr()\n"
] | [
[
"numpy.array",
"scipy.io.loadmat"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
k-washi/ita_corpus_recorder | [
"29a551b1647247ef343d15859f47710a63eed9fc"
] | [
"src/record_frame.py"
] | [
"import os\nimport tkinter as tk\nfrom tkinter import ttk\nfrom src.load_corpus import load_corpus, split_corpus\nfrom src.utils import get_eval_index, get_recorded_index\n\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2Tk\nfrom matplotlib.figure import Figure\nimport numpy as np\nfrom src.audio.signal import AudioProcessing\nimport shutil\nclass RecordFrame(tk.Frame):\n def __init__(self, master, in_mic, out_mic, cnf):\n super().__init__(master) \n self.cnf = cnf\n self.in_mic = in_mic\n self.out_mic = out_mic\n \n # 一時保存用のpathを作成\n os.makedirs(self.cnf.audio.tmp_dir, exist_ok=True)\n self._tmp_audio_path = os.path.join(self.cnf.audio.tmp_dir, self.cnf.audio.tmp_wav)\n\n self.audio_proc = AudioProcessing(self.in_mic, self.out_mic, self.cnf)\n \n self.recorded_dir = os.path.join(self.cnf.path.record_dir, self.cnf.path.recorded_dir)\n \n self.now_record_selected = -1\n self.now_record_index = \"\"\n self.recording = False\n self.has_recorded_audio = False\n self.listening = False\n\n self.create_text_widgets()\n self.create_list_box()\n self.create_widgets()\n self.plot_canvas()\n \n def load_corpus(self):\n # コーパスの読み込み\n corpus1 = split_corpus(load_corpus(self.cnf.path.emotion_corpus))\n corpus2 = split_corpus(load_corpus(self.cnf.path.recitation_corpus))\n\n tmp_corpus = corpus1 + corpus2\n\n # 録音済みデータ、評価済みデータを除去\n recorded_index = get_recorded_index(self.cnf)\n # eval_index = get_eval_index(self.cnf)\n saved_index = recorded_index #+ eval_index\n\n corpus = {}\n for c in tmp_corpus:\n if c[0] in saved_index:\n continue\n corpus[c[0]] = c[1]\n print(len(corpus.keys()))\n return corpus\n \n def plot_canvas(self):\n fig = Figure(figsize=(3, 2)) #Figure\n fig.tight_layout()\n self.ax = fig.add_subplot(1, 1, 1) #Axes\n self.ax.axis('off')\n self.wav_canvas = Figure(figsize=(3, 3)) \n self.wav_canvas = FigureCanvasTkAgg(fig, self)\n self.wav_canvas.get_tk_widget().grid(row=0, column=7, rowspan=3, columnspan=3, padx=5, pady=10)\n\n\n def create_list_box(self):\n self.corpus_dic = self.load_corpus()\n\n listbox_init_corpus = tk.StringVar(value=list(self.corpus_dic.keys()))\n self.corpus_listbox = tk.Listbox(self, bd=5, width=15, relief=\"groove\", fg=\"black\",bg=\"white\", selectmode=\"single\", listvariable=listbox_init_corpus)\n\n self.corpus_listbox.grid(row=0, column=0, rowspan=3, columnspan=2, padx=5, pady=10)\n\n self.corpus_listbox.bind('<<ListboxSelect>>', self.listbox_active)\n\n \n self.active = False\n self.corpus_listbox.selection_clear(0, tk.END)\n self.corpus_listbox.selection_set(0)\n self.corpus_listbox.activate(0)\n self.text_update()\n\n\n\n def create_widgets(self):\n self.record_button = tk.Button(\n self,\n text=\"録音\",\n command=self.record_start\n )\n self.record_button.grid(row=0, column=3, pady=20)\n self.record_button.config(fg=\"gray\")\n\n\n\n self.stop_button = tk.Button(\n self,\n text=\"停止\",\n command=self.record_stop\n )\n self.stop_button.grid(row=0, column=4, pady=20)\n self.stop_button.config(fg=\"gray\")\n\n self.listen_button = tk.Button(\n self,\n text=\"聞く\",\n command=self.listen_audio\n )\n self.listen_button.grid(row=0, column=5, pady=20)\n self.listen_button.config(fg=\"gray\")\n\n self.publish_button = tk.Button(\n self,\n text=\"提出\",\n command=self.publish_audio\n )\n self.publish_button.grid(row=0, column=6, pady=20)\n self.publish_button.config(fg=\"gray\")\n \n\n def create_text_widgets(self):\n # text\n self.selected_var = tk.StringVar(self)\n self.selected_text = tk.Label(self, textvariable=self.selected_var, font=(\"\", 20))\n self.selected_var.set(\"選択してください。\")\n\n self.selected_des_var = tk.StringVar(self)\n self.selected_text_des = tk.Message(self, textvariable=self.selected_des_var, font=(\"\", 15), width=100)\n self.selected_des_var.set(\"***\")\n\n self.selected_text.grid(row=1, column=3, columnspan=4)\n self.selected_text_des.grid(row=2, column=3, columnspan=4, rowspan=2, sticky=tk.W+tk.S, padx=5, pady=5)\n\n def text_update(self):\n index = self.corpus_listbox.curselection()\n if len(index) == 0:\n return\n text = self.corpus_listbox.get(index)\n dis = self.corpus_dic.get(text, \"hogehoge\")\n self.selected_var.set(text)\n self.selected_des_var.set(dis) \n \n def listbox_active(self, context):\n self.active = True\n if not self.recording and not self.has_recorded_audio:\n self.text_update()\n self.record_button.config(fg=\"white\")\n \n def record_start(self):\n if self.active and not self.recording:\n \n self.record_button.config(fg=\"gray\")\n self.stop_button.config(fg=\"white\")\n self.listen_button.config(fg=\"gray\")\n self.publish_button.config(fg=\"gray\")\n\n self.recording = True\n self.listening = False\n self.audio_proc.record_start()\n \n\n if not self.has_recorded_audio:\n # 選択が反映される\n\n self.now_record_selected = self.corpus_listbox.curselection()\n self.now_record_index = self.corpus_listbox.get(self.now_record_selected)\n print(\"Record\")\n\n def record_stop(self): \n if self.active and self.recording and not self.listening: \n self.record_button.config(fg=\"white\")\n self.stop_button.config(fg=\"gray\")\n self.listen_button.config(fg=\"white\")\n self.publish_button.config(fg=\"white\")\n \n self.audio_proc.record_stop() \n ok, data = self.audio_proc.save(self._tmp_audio_path) #一時保存\n if not ok:\n print(\"recordはemptyです!\")\n else:\n self.plot_update(data)\n\n self.recording = False\n self.has_recorded_audio = True\n\n print(\"Stop\")\n \n def plot_update(self, data):\n \n x = np.arange(len(data)) / self.cnf.audio.sampling_rate\n data = np.array(data)\n wmax = np.max(np.abs(data))\n data = data / (wmax+0.2)\n self.ax.cla()\n self.line = self.ax.plot(x, data)\n self.ax.get_yaxis().set_visible(False)\n self.ax.set_xlim([0, x[-1]])\n self.ax.set_ylim([-1.1,1.1])\n #self.ax.axis('tight')\n \n \n self.wav_canvas.draw()\n \n def listen_audio(self):\n if self.active and not self.recording and self.has_recorded_audio:\n self.listening = True\n self.audio_proc.open_and_listen(self._tmp_audio_path)\n print(\"聞く。\")\n \n \n \n \n\n def publish_audio(self):\n if self.active and not self.recording and self.has_recorded_audio:\n audio_path = os.path.join(self.recorded_dir, self.now_record_index+\".wav\")\n shutil.move(\n self._tmp_audio_path,\n audio_path\n )\n\n self.record_button.config(fg=\"white\")\n self.stop_button.config(fg=\"gray\")\n self.listen_button.config(fg=\"gray\")\n self.publish_button.config(fg=\"gray\")\n self.listening = False\n self.has_recorded_audio = False\n self.create_list_box()\n\n print(\"提出しました。\")\n\n\n\n\n\n\n "
] | [
[
"numpy.array",
"matplotlib.backends.backend_tkagg.FigureCanvasTkAgg",
"numpy.abs",
"matplotlib.figure.Figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
opensafely/hiv-research | [
"380915bf5dbf4e46f2f9616d7e86211d06423b69"
] | [
"analysis/cohortextractor2/cohortextractor.py"
] | [
"#!/usr/bin/env python3\n\n\"\"\"A cross-platform script to build cohorts, run models, build and\nstart a notebook, open a web browser on the correct port, and handle\nshutdowns gracefully\n\"\"\"\nimport cohortextractor\nimport glob\nimport importlib\nimport os\nimport re\nimport requests\nimport shutil\nimport sys\n\n\nimport base64\nfrom io import BytesIO\nfrom argparse import ArgumentParser\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom pandas.api.types import is_categorical_dtype\nfrom pandas.api.types import is_bool_dtype\nfrom pandas.api.types import is_datetime64_dtype\nfrom pandas.api.types import is_numeric_dtype\nimport yaml\n\nfrom datetime import datetime\nimport seaborn as sns\n\nfrom cohortextractor.remotejobs import get_job_logs\nfrom cohortextractor.remotejobs import submit_job\n\nnotebook_tag = \"opencorona-research\"\ntarget_dir = \"/home/app/notebook\"\n\n\ndef relative_dir():\n return os.getcwd()\n\n\ndef make_chart(name, series, dtype):\n FLOOR_DATE = datetime(1960, 1, 1)\n CEILING_DATE = datetime.today()\n img = BytesIO()\n # Setting figure sizes in seaborn is a bit weird:\n # https://stackoverflow.com/a/23973562/559140\n if is_categorical_dtype(dtype):\n sns.set_style(\"ticks\")\n sns.catplot(\n x=name, data=series.to_frame(), kind=\"count\", height=3, aspect=3 / 2\n )\n plt.xticks(rotation=45)\n elif is_bool_dtype(dtype):\n sns.set_style(\"ticks\")\n sns.catplot(x=name, data=series.to_frame(), kind=\"count\", height=2, aspect=1)\n plt.xticks(rotation=45)\n elif is_datetime64_dtype(dtype):\n # Early dates are dummy values; I don't know what late dates\n # are but presumably just dud data\n series = series[(series > FLOOR_DATE) & (series <= CEILING_DATE)]\n # Set bin numbers appropriate to the time window\n delta = series.max() - series.min()\n if delta.days <= 31:\n bins = delta.days\n elif delta.days <= 365 * 10:\n bins = delta.days / 31\n else:\n bins = delta.days / 365\n if bins < 1:\n bins = 1\n fig = plt.figure(figsize=(5, 2))\n ax = fig.add_subplot(111)\n series.hist(bins=int(bins), ax=ax)\n plt.xticks(rotation=45, ha=\"right\")\n elif is_numeric_dtype(dtype):\n # Trim percentiles and negatives which are usually bad data\n series = series.fillna(0)\n series = series[\n (series < np.percentile(series, 95))\n & (series > np.percentile(series, 5))\n & (series > 0)\n ]\n fig = plt.figure(figsize=(5, 2))\n ax = fig.add_subplot(111)\n sns.distplot(series, kde=False, ax=ax)\n plt.xticks(rotation=45)\n else:\n raise ValueError()\n\n plt.savefig(img, transparent=True, bbox_inches=\"tight\")\n img.seek(0)\n plt.close()\n return base64.b64encode(img.read()).decode(\"UTF-8\")\n\n\ndef preflight_generation_check():\n \"\"\"Raise an informative error if things are not as they should be\n \"\"\"\n missing_paths = []\n required_paths = [\"codelists/\", \"analysis/\"]\n for p in required_paths:\n if not os.path.exists(p):\n missing_paths.append(p)\n if missing_paths:\n msg = \"This command expects the following relative paths to exist: {}\"\n raise RuntimeError(msg.format(\", \".join(missing_paths)))\n\n\ndef generate_cohort(output_dir, expectations_population):\n preflight_generation_check()\n for study_name, suffix in list_study_definitions():\n print(f\"Generating cohort for {study_name}...\")\n _generate_cohort(output_dir, study_name, suffix, expectations_population)\n\n\ndef _generate_cohort(output_dir, study_name, suffix, expectations_population):\n print(\"Running. Please wait...\")\n study = load_study_definition(study_name)\n\n with_sqlcmd = shutil.which(\"sqlcmd\") is not None\n os.makedirs(output_dir, exist_ok=True)\n study.to_csv(\n f\"{output_dir}/input{suffix}.csv\",\n expectations_population=expectations_population,\n with_sqlcmd=with_sqlcmd,\n )\n print(\n f\"Successfully created cohort and covariates at {output_dir}/input{suffix}.csv\"\n )\n\n\ndef make_cohort_report(input_dir, output_dir):\n for study_name, suffix in list_study_definitions():\n _make_cohort_report(input_dir, output_dir, study_name, suffix)\n\n\ndef _make_cohort_report(input_dir, output_dir, study_name, suffix):\n study = load_study_definition(study_name)\n\n df = study.csv_to_df(f\"{input_dir}/input{suffix}.csv\")\n descriptives = df.describe(include=\"all\")\n\n for name, dtype in zip(df.columns, df.dtypes):\n if name == \"patient_id\":\n continue\n main_chart = '<div><img src=\"data:image/png;base64,{}\"/></div>'.format(\n make_chart(name, df[name], dtype)\n )\n empty_values_chart = \"\"\n if is_datetime64_dtype(dtype):\n # also do a null / not null plot\n empty_values_chart = '<div><img src=\"data:image/png;base64,{}\"/></div>'.format(\n make_chart(name, df[name].isnull(), bool)\n )\n elif is_numeric_dtype(dtype):\n # also do a null / not null plot\n empty_values_chart = '<div><img src=\"data:image/png;base64,{}\"/></div>'.format(\n make_chart(name, df[name] > 0, bool)\n )\n descriptives.loc[\"values\", name] = main_chart\n descriptives.loc[\"nulls\", name] = empty_values_chart\n\n with open(f\"{output_dir}/descriptives{suffix}.html\", \"w\") as f:\n\n f.write(\n \"\"\"<html>\n<head>\n <style>\n table {\n text-align: left;\n position: relative;\n border-collapse: collapse;\n }\n td, th {\n padding: 8px;\n margin: 2px;\n }\n td {\n border-left: solid 1px black;\n }\n tr:nth-child(even) {background: #EEE}\n tr:nth-child(odd) {background: #FFF}\n tbody th:first-child {\n position: sticky;\n left: 0px;\n background: #fff;\n }\n </style>\n</head>\n<body>\"\"\"\n )\n\n f.write(descriptives.to_html(escape=False, na_rep=\"\", justify=\"left\", border=0))\n f.write(\"</body></html>\")\n print(f\"Created cohort report at {output_dir}/descriptives{suffix}.html\")\n\n\ndef update_codelists():\n base_path = os.path.join(os.getcwd(), \"codelists\")\n\n # delete all existing codelists\n for path in glob.glob(os.path.join(base_path, \"*.csv\")):\n os.unlink(path)\n\n with open(os.path.join(base_path, \"codelists.txt\")) as f:\n for line in f:\n line = line.strip()\n if not line:\n continue\n\n print(line)\n project_id, codelist_id, version = line.split(\"/\")\n url = f\"https://codelists.opensafely.org/codelist/{project_id}/{codelist_id}/{version}/download.csv\"\n\n rsp = requests.get(url)\n rsp.raise_for_status()\n\n with open(\n os.path.join(base_path, f\"{project_id}-{codelist_id}.csv\"), \"w\"\n ) as f:\n f.write(rsp.text)\n\n\ndef dump_cohort_sql(study_definition):\n study = load_study_definition(study_definition)\n print(study.to_sql())\n\n\ndef dump_study_yaml(study_definition):\n study = load_study_definition(study_definition)\n print(yaml.dump(study.to_data()))\n\n\ndef load_study_definition(name):\n sys.path.extend([relative_dir(), os.path.join(relative_dir(), \"analysis\")])\n # Avoid creating __pycache__ files in the analysis directory\n sys.dont_write_bytecode = True\n return importlib.import_module(name).study\n\n\ndef list_study_definitions():\n pattern = re.compile(r\"^(study_definition(_\\w+)?)\\.py$\")\n matches = []\n for name in sorted(os.listdir(os.path.join(relative_dir(), \"analysis\"))):\n match = pattern.match(name)\n if match:\n name = match.group(1)\n suffix = match.group(2) or \"\"\n matches.append((name, suffix))\n if not matches:\n raise RuntimeError(f\"No study definitions found in {relative_dir()}\")\n return matches\n\n\ndef main():\n parser = ArgumentParser(\n description=\"Generate cohorts and run models in openSAFELY framework. \"\n )\n # Cohort parser options\n parser.add_argument(\"--version\", help=\"Display version\", action=\"store_true\")\n subparsers = parser.add_subparsers(help=\"sub-command help\")\n generate_cohort_parser = subparsers.add_parser(\n \"generate_cohort\", help=\"Generate cohort\"\n )\n generate_cohort_parser.set_defaults(which=\"generate_cohort\")\n cohort_report_parser = subparsers.add_parser(\n \"cohort_report\", help=\"Generate cohort report\"\n )\n cohort_report_parser.set_defaults(which=\"cohort_report\")\n cohort_report_parser.add_argument(\n \"--input-dir\",\n help=\"Location to look for input CSVs\",\n type=str,\n default=\"analysis\",\n )\n cohort_report_parser.add_argument(\n \"--output-dir\",\n help=\"Location to store output CSVs\",\n type=str,\n default=\"output\",\n )\n\n run_notebook_parser = subparsers.add_parser(\"notebook\", help=\"Run notebook\")\n run_notebook_parser.set_defaults(which=\"notebook\")\n update_codelists_parser = subparsers.add_parser(\n \"update_codelists\",\n help=\"Update codelists, using specification at codelists/codelists.txt\",\n )\n update_codelists_parser.set_defaults(which=\"update_codelists\")\n dump_cohort_sql_parser = subparsers.add_parser(\n \"dump_cohort_sql\", help=\"Show SQL to generate cohort\"\n )\n dump_cohort_sql_parser.add_argument(\n \"--study-definition\", help=\"Study definition name\", type=str, required=True\n )\n dump_cohort_sql_parser.set_defaults(which=\"dump_cohort_sql\")\n dump_study_yaml_parser = subparsers.add_parser(\n \"dump_study_yaml\", help=\"Show study definition as YAML\"\n )\n dump_study_yaml_parser.set_defaults(which=\"dump_study_yaml\")\n dump_study_yaml_parser.add_argument(\n \"--study-definition\", help=\"Study definition name\", type=str, required=True\n )\n\n remote_parser = subparsers.add_parser(\"remote\", help=\"Manage remote jobs\")\n remote_parser.set_defaults(which=\"remote\")\n\n # Remote subcommands\n remote_subparser = remote_parser.add_subparsers(help=\"Remote sub-command help\")\n generate_cohort_remote_parser = remote_subparser.add_parser(\n \"generate_cohort\", help=\"Generate cohort\"\n )\n generate_cohort_remote_parser.set_defaults(which=\"remote_generate_cohort\")\n generate_cohort_remote_parser.add_argument(\n \"--ref\",\n help=\"Tag or branch against which to run the extraction\",\n type=str,\n required=True,\n )\n generate_cohort_remote_parser.add_argument(\n \"--repo\",\n help=\"Tag or branch against which to run the extraction (leave blank for current repo)\",\n type=str,\n )\n generate_cohort_remote_parser.add_argument(\n \"--db\",\n help=\"Database to run against\",\n choices=[\"full\", \"slice\", \"dummy\"],\n nargs=\"?\",\n const=\"full\",\n default=\"full\",\n type=str,\n )\n generate_cohort_remote_parser.add_argument(\n \"--backend\",\n help=\"Backend to run against\",\n choices=[\"all\", \"tpp\"],\n nargs=\"?\",\n const=\"all\",\n default=\"all\",\n type=str,\n )\n\n log_remote_parser = remote_subparser.add_parser(\"log\", help=\"Show logs\")\n log_remote_parser.set_defaults(which=\"remote_log\")\n\n # Cohort parser options\n generate_cohort_parser.add_argument(\n \"--output-dir\",\n help=\"Location to store output CSVs\",\n type=str,\n default=\"output\",\n )\n cohort_method_group = generate_cohort_parser.add_mutually_exclusive_group(\n required=True\n )\n cohort_method_group.add_argument(\n \"--expectations-population\",\n help=\"Generate a dataframe from study expectations\",\n type=int,\n default=0,\n )\n cohort_method_group.add_argument(\n \"--database-url\",\n help=\"Database URL to query\",\n type=str,\n default=os.environ.get(\"DATABASE_URL\", \"\"),\n )\n\n options = parser.parse_args()\n if options.version:\n print(f\"v{cohortextractor.__version__}\")\n elif not hasattr(options, \"which\"):\n parser.print_help()\n elif options.which == \"generate_cohort\":\n os.environ[\"DATABASE_URL\"] = options.database_url\n generate_cohort(options.output_dir, options.expectations_population)\n elif options.which == \"cohort_report\":\n make_cohort_report(options.input_dir, options.output_dir)\n elif options.which == \"update_codelists\":\n update_codelists()\n print(\"Codelists updated. Don't forget to commit them to the repo\")\n elif options.which == \"dump_cohort_sql\":\n dump_cohort_sql(options.study_definition)\n elif options.which == \"dump_study_yaml\":\n dump_study_yaml(options.study_definition)\n elif options.which == \"remote_generate_cohort\":\n submit_job(\n options.backend, options.db, options.ref, \"generate_cohort\", options.repo\n )\n print(\"Job submitted!\")\n elif options.which == \"remote_log\":\n logs = get_job_logs()\n print(\"\\n\".join(logs))\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"pandas.api.types.is_categorical_dtype",
"pandas.api.types.is_datetime64_dtype",
"matplotlib.pyplot.savefig",
"pandas.api.types.is_numeric_dtype",
"numpy.percentile",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xticks",
"pandas.api.types.is_bool_dtype",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
aviolante/python-dlpy | [
"9bf8cc4ffd5ae235e377004644ef70398431e09c"
] | [
"dlpy/model_conversion/write_keras_model_parm.py"
] | [
"#!/usr/bin/env python\n# encoding: utf-8\n#\n# Copyright SAS Institute\n#\n# Licensed under the Apache License, Version 2.0 (the License);\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\n''' Supporting functions for keras model conversion '''\n\nimport sys\nimport h5py\nimport numpy as np\nfrom keras import backend as K\ntry:\n from keras.engine.topology import preprocess_weights_for_loading\nexcept ImportError:\n from keras.engine.saving import preprocess_weights_for_loading\n\n# let Keras read parameters and then transform to format needed for SAS deep learning\n# NOTE: modified version of Keras function load_weights_from_hdf5_group()\ndef write_keras_hdf5_from_file(model, hdf5_in, hdf5_out):\n '''\n Generate an HDF5 file with trained model parameters given a Keras definition\n\n Parameters\n ----------\n model : Keras model\n Keras deep learning model\n hdf5_in : string\n Fully qualified file name of Keras HDF5 file\n hdf5_out : string\n Fully qualified file name of SAS-compatible HDF5 file\n\n '''\n # open input/output files\n f_in = h5py.File(hdf5_in, 'r')\n f_out = h5py.File(hdf5_out, 'w')\n\n if 'keras_version' in f_in.attrs:\n original_keras_version = f_in.attrs['keras_version'].decode('utf8')\n else:\n original_keras_version = '1'\n\n if 'backend' in f_in.attrs:\n original_backend = f_in.attrs['backend'].decode('utf8')\n else:\n original_backend = None\n\n try:\n image_data_format = K.image_data_format()\n\n # determine layers with weights\n filtered_layers = []\n for layer in model.layers:\n weights = layer.weights\n if weights:\n filtered_layers.append(layer)\n\n layer_names = [n.decode('utf8') for n in f_in.attrs['layer_names']]\n filtered_layer_names = []\n for name in layer_names:\n g = f_in[name]\n weight_names = [n.decode('utf8') for n in g.attrs['weight_names']]\n if weight_names:\n filtered_layer_names.append(name)\n\n layer_names = filtered_layer_names\n if len(layer_names) != len(filtered_layers):\n raise ValueError('You are trying to load a weight file '\n 'containing ' + str(len(layer_names)) +\n ' layers into a model with ' +\n str(len(filtered_layers)) + ' layers.')\n\n # determine permutation vector associated with flattening layer (if it exists)\n flatten_layer_index = -1\n index = 0\n for layer in model.layers:\n if (layer.__class__.__name__.lower() == 'flatten'):\n flatten_layer_index = index\n break\n index = index + 1\n\n if (flatten_layer_index != -1):\n layer = model.layers[flatten_layer_index]\n permute_layer_name = model.layers[flatten_layer_index + 1].name\n if (image_data_format == 'channels_first'):\n C, H, W = (layer.input_shape)[1:]\n else:\n H, W, C = (layer.input_shape)[1:]\n N = (layer.output_shape)[1]\n perm_index = [0] * N\n if (image_data_format == 'channels_last'):\n ii = 0\n for cc in range(C):\n for hh in range(H):\n for ww in range(W):\n perm_index[ii] = hh * W * C + ww * C + cc\n ii = ii + 1\n else:\n for nn in range(N):\n perm_index[nn] = nn\n else:\n perm_index = []\n\n f_out.attrs['layer_names'] = [l.encode('utf8') for l in layer_names]\n # let Keras read weights, reformat, and write to SAS-compatible file\n for k, name in enumerate(layer_names):\n g_in = f_in[name]\n g_out = f_out.create_group(name)\n new_weight_names = []\n\n weight_names = [n.decode('utf8') for n in g_in.attrs['weight_names']]\n weight_values = [g_in[weight_name] for weight_name in weight_names]\n layer = filtered_layers[k]\n symbolic_weights = layer.weights\n weight_values = preprocess_weights_for_loading(layer,\n weight_values,\n original_keras_version,\n original_backend)\n if len(weight_values) != len(symbolic_weights):\n raise ValueError('Layer #' + str(k) +\n ' (named \"' + layer.name +\n '\" in the current model) was found to '\n 'correspond to layer ' + name +\n ' in the saved file. '\n 'However the new layer ' + layer.name +\n ' expects ' + str(len(symbolic_weights)) +\n ' weights, but the saved weights have ' +\n str(len(weight_values)) +\n ' elements.')\n if layer.__class__.__name__.lower() == 'batchnormalization':\n bn_gamma = np.ones(weight_values[0].shape,\n dtype=weight_values[0].dtype)\n bn_beta = np.zeros(weight_values[0].shape,\n dtype=weight_values[0].dtype)\n # if scale = False and center = True\n if not layer.get_config()['scale'] and layer.get_config()['center']:\n weight_values.insert(0, bn_gamma)\n weight_names.insert(0, layer.name+'/'+'gamma:0')\n # if scale = True and center = False\n elif layer.get_config()['scale'] and not layer.get_config()['center']:\n weight_values.insert(1, bn_beta)\n weight_names.insert(1, layer.name+'/'+'beta:0')\n # if scale = False and center = False\n elif not layer.get_config()['scale'] and not layer.get_config()['center']:\n weight_values = [bn_gamma, bn_beta] + weight_values\n weight_names = [layer.name+'/'+'gamma:0', \n layer.name+'/'+'beta:0'] + weight_names\n # read/write weights\n for ii in range(len(weight_names)):\n if type(weight_values[ii]) == np.ndarray:\n tensor_in = weight_values[ii]\n else:\n tensor_in = np.zeros(weight_values[ii].shape,\n dtype=weight_values[ii].dtype)\n weight_values[ii].read_direct(tensor_in)\n\n # permute axes as needed to conform to SAS deep\n # learning \"channels first\" format\n if ((image_data_format == 'channels_first') or (not perm_index)):\n # format: (C,fdim1, fdim2, fdim3) ==> (C,fdim3,fdim1,fdim2)\n if (len(tensor_in.shape) == 4):\n tensor_out = np.transpose(tensor_in, (0, 3, 1, 2))\n else:\n tensor_out = tensor_in.copy()\n else:\n # \"channels last\" format\n # this is a vector - nothing to permute\n if (len(tensor_in.shape) == 1):\n tensor_out = tensor_in.copy()\n else:\n # permute Conv2D tensor to \"channels_first\" format\n if (layer.__class__.__name__ == 'Conv2D'):\n tensor_out = np.transpose(tensor_in, (3, 2, 0, 1))\n # have to account for neuron ordering in first dense\n # layer following flattening operation\n elif (layer.__class__.__name__ == 'Dense'):\n if (layer.name == permute_layer_name):\n tensor_out = np.zeros(tensor_in.shape)\n for jj in range(tensor_out.shape[0]):\n tensor_out[jj, :] = tensor_in[perm_index[jj], :]\n else: # not following flattening, just copy\n tensor_out = tensor_in.copy()\n\n # mimic Caffe layout\n tensor_out = np.transpose(tensor_out, (1, 0))\n\n # save weight in format amenable to SAS\n dset_name = generate_dataset_name(layer, ii)\n new_weight_names.append(dset_name)\n g_out.create_dataset(dset_name, data=tensor_out)\n\n # update weight names\n g_out.attrs['weight_names'] = new_weight_names\n\n except ValueError as err_msg:\n print(err_msg)\n\n finally:\n # close files\n f_out.close()\n f_in.close()\n\n # generate dataset name with template layerName/layerName/weightName:0\n\n\ndef write_keras_hdf5(model, hdf5_out):\n '''\n Generate an HDF5 file with trained model parameters given a Keras definition\n\n Parameters\n ----------\n model : Keras model\n Keras deep learning model\n hdf5_out : string\n Fully qualified file name of SAS-compatible HDF5 file\n\n '''\n # open output file\n f_out = h5py.File(hdf5_out, 'w')\n\n try:\n image_data_format = K.image_data_format()\n\n # determine layers with weights\n filtered_layers = []\n for layer in model.layers:\n weights = layer.weights\n if weights:\n filtered_layers.append(layer)\n\n # determine permutation vector associated with flattening layer (if it exists)\n flatten_layer_index = -1\n index = 0\n for layer in model.layers:\n if (layer.__class__.__name__.lower() == 'flatten'):\n flatten_layer_index = index\n break\n index = index + 1\n\n if (flatten_layer_index != -1):\n layer = model.layers[flatten_layer_index]\n permute_layer_name = model.layers[flatten_layer_index + 1].name\n if (image_data_format == 'channels_first'):\n C, H, W = (layer.input_shape)[1:]\n else:\n H, W, C = (layer.input_shape)[1:]\n N = (layer.output_shape)[1]\n perm_index = [0] * N\n if (image_data_format == 'channels_last'):\n ii = 0\n for cc in range(C):\n for hh in range(H):\n for ww in range(W):\n perm_index[ii] = hh * W * C + ww * C + cc\n ii = ii + 1\n else:\n for nn in range(N):\n perm_index[nn] = nn\n else:\n perm_index = []\n\n # let Keras read weights, reformat, and write to SAS-compatible file\n for k, layer in enumerate(filtered_layers):\n # g_in = f_in[name]\n g_out = f_out.create_group(layer.name)\n symbolic_weights = layer.weights\n weight_values = K.batch_get_value(symbolic_weights)\n weight_names = []\n for i, (w, val) in enumerate(zip(symbolic_weights, weight_values)):\n if hasattr(w, 'name') and w.name:\n name = str(w.name)\n else:\n name = 'param_' + str(i)\n weight_names.append(name.encode('utf8'))\n\n # layer modification from here:\n new_weight_names = []\n\n if len(weight_values) != len(symbolic_weights):\n raise ValueError('Layer #' + str(k) +\n ' (named \"' + layer.name +\n '\" in the current model) was found to '\n 'correspond to layer ' + name +\n ' in the saved file. '\n 'However the new layer ' + layer.name +\n ' expects ' + str(len(symbolic_weights)) +\n ' weights, but the saved weights have ' +\n str(len(weight_values)) +\n ' elements.')\n # read/write weights\n for ii in range(len(weight_names)):\n tensor_in = weight_values[ii]\n\n # permute axes as needed to conform to SAS deep\n # learning \"channels first\" format\n if ((image_data_format == 'channels_first') or (not perm_index)):\n # format: (C,fdim1, fdim2, fdim3) ==> (C,fdim3,fdim1,fdim2)\n if (len(tensor_in.shape) == 4):\n tensor_out = np.transpose(tensor_in, (0, 3, 1, 2))\n else:\n tensor_out = tensor_in.copy()\n else:\n # \"channels last\" format\n # this is a vector - nothing to permute\n if (len(tensor_in.shape) == 1):\n tensor_out = tensor_in.copy()\n else:\n # permute Conv2D tensor to \"channels_first\" format\n if (layer.__class__.__name__ == 'Conv2D'):\n tensor_out = np.transpose(tensor_in, (3, 2, 0, 1))\n # have to account for neuron ordering in first dense\n # layer following flattening operation\n elif (layer.__class__.__name__ == 'Dense'):\n if (layer.name == permute_layer_name):\n tensor_out = np.zeros(tensor_in.shape)\n for jj in range(tensor_out.shape[0]):\n tensor_out[jj, :] = tensor_in[perm_index[jj], :]\n else: # not following flattening, just copy\n tensor_out = tensor_in.copy()\n\n # mimic Caffe layout\n tensor_out = np.transpose(tensor_out, (1, 0))\n\n # save weight in format amenable to SAS\n dset_name = generate_dataset_name(layer, ii)\n new_weight_names.append(dset_name)\n\n g_out.create_dataset(dset_name, data=tensor_out)\n\n # update weight names\n g_out.attrs['weight_names'] = new_weight_names\n\n except ValueError as err_msg:\n print(err_msg)\n\n finally:\n # close files\n f_out.close()\n # generate dataset name with template layerName/layerName/weightName:0\n\n\ndef generate_dataset_name(layer, index):\n '''\n Generate data set names consistent with names generated by Keras models\n\n Parameters\n ----------\n layer : Layer\n Current layer definition\n index : int\n Data set index\n\n Returns\n -------\n UTF-8 encoded data set name\n\n '''\n layer_class_name = layer.__class__.__name__.lower()\n if (layer_class_name in ['conv2d', 'dense']):\n template_names = ['kernel:0', 'bias:0']\n elif (layer_class_name == 'batchnormalization'):\n template_names = ['gamma:0', 'beta:0', 'moving_mean:0', 'moving_variance:0']\n else:\n raise ValueError('Unable to translate layer weight name for layer = ' + layer.name)\n\n dataset_name = layer.name + '/' + template_names[index]\n return dataset_name.encode('utf8')\n"
] | [
[
"numpy.zeros",
"numpy.transpose",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Akegarasu/auto-derby | [
"e237be7cbbf98d1fc1a9b85b928545977e981a17"
] | [
"auto_derby/single_mode/race/game_data.py"
] | [
"# pyright: strict\n# -*- coding=UTF-8 -*-\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING, Iterator, Text, Tuple\n\nif TYPE_CHECKING:\n from ..context import Context\n\nimport json\nimport logging\nimport os\nimport warnings\n\nimport cast_unknown as cast\nimport cv2\nimport numpy as np\nimport PIL.Image\nimport PIL.ImageOps\n\nfrom ... import imagetools, mathtools, ocr, template, templates\nfrom .globals import g\nfrom .race import Race\n\nLOGGER = logging.getLogger(__name__)\n\n\nclass _g:\n loaded_data_path = \"\"\n\n\ndef _iter_races():\n with open(g.data_path, \"r\", encoding=\"utf-8\") as f:\n for line in f:\n yield Race.new().from_dict(json.loads(line))\n\n\ndef _load_legacy_json():\n warnings.warn(\n \"json race data support will be removed at next major version, use jsonl instead\",\n DeprecationWarning,\n )\n with open(g.data_path, \"r\", encoding=\"utf-8\") as f:\n g.races = tuple(Race.new().from_dict(i) for i in json.load(f))\n\n\ndef reload() -> None:\n if g.data_path.endswith(\".json\"):\n _load_legacy_json()\n return\n g.races = tuple(_iter_races())\n _g.loaded_data_path = g.data_path\n\n\ndef reload_on_demand() -> None:\n if _g.loaded_data_path != g.data_path:\n reload()\n\n\ndef find_by_date(date: Tuple[int, int, int]) -> Iterator[Race]:\n reload_on_demand()\n year, month, half = date\n for i in g.races:\n if year not in i.years:\n continue\n if date == (1, 0, 0) and i.grade != Race.GRADE_DEBUT:\n continue\n if (month, half) not in ((i.month, i.half), (0, 0)):\n continue\n yield i\n\n\ndef find(ctx: Context) -> Iterator[Race]:\n if ctx.date[1:] == (0, 0):\n return\n for i in find_by_date(ctx.date):\n if i.grade == Race.GRADE_NOT_WINNING and (\n ctx.is_after_winning or ctx.fan_count == 1\n ):\n continue\n if i.grade < Race.GRADE_NOT_WINNING and not ctx.is_after_winning:\n continue\n if ctx.fan_count < i.min_fan_count:\n continue\n # target race should be excluded when finding available race\n if i.is_target_race(ctx):\n continue\n yield i\n\n\ndef _recognize_fan_count(img: PIL.Image.Image) -> int:\n cv_img = imagetools.cv_image(imagetools.resize(img.convert(\"L\"), height=32))\n cv_img = imagetools.level(\n cv_img, np.percentile(cv_img, 1), np.percentile(cv_img, 90)\n )\n _, binary_img = cv2.threshold(cv_img, 60, 255, cv2.THRESH_BINARY_INV)\n if os.getenv(\"DEBUG\") == __name__:\n cv2.imshow(\"cv_img\", cv_img)\n cv2.imshow(\"binary_img\", binary_img)\n cv2.waitKey()\n cv2.destroyAllWindows()\n text = ocr.text(imagetools.pil_image(binary_img))\n return int(text.rstrip(\"人\").replace(\",\", \"\"))\n\n\ndef _recognize_spec(img: PIL.Image.Image) -> Tuple[Text, int, int, int, int]:\n cv_img = imagetools.cv_image(imagetools.resize(img.convert(\"L\"), height=32))\n cv_img = imagetools.level(\n cv_img, np.percentile(cv_img, 1), np.percentile(cv_img, 90)\n )\n _, binary_img = cv2.threshold(cv_img, 60, 255, cv2.THRESH_BINARY_INV)\n if os.getenv(\"DEBUG\") == __name__:\n cv2.imshow(\"cv_img\", cv_img)\n cv2.imshow(\"binary_img\", binary_img)\n cv2.waitKey()\n cv2.destroyAllWindows()\n text = ocr.text(imagetools.pil_image(binary_img))\n stadium, text = text[:2], text[2:]\n if text[0] == \"芝\":\n text = text[1:]\n ground = Race.GROUND_TURF\n elif text[0] == \"ダ\":\n text = text[3:]\n ground = Race.GROUND_DART\n else:\n raise ValueError(\"_recognize_spec: invalid spec: %s\", text)\n\n distance, text = int(text[:4]), text[10:]\n\n turn, track = {\n \"左·内\": (Race.TURN_LEFT, Race.TRACK_IN),\n \"右·内\": (Race.TURN_RIGHT, Race.TRACK_IN),\n \"左\": (Race.TURN_LEFT, Race.TRACK_MIDDLE),\n \"右\": (Race.TURN_RIGHT, Race.TRACK_MIDDLE),\n \"左·外\": (Race.TURN_LEFT, Race.TRACK_OUT),\n \"右·外\": (Race.TURN_RIGHT, Race.TRACK_OUT),\n \"直線\": (Race.TURN_NONE, Race.TRACK_MIDDLE),\n \"右·外→内\": (Race.TURN_RIGHT, Race.TRACE_OUT_TO_IN),\n }[text]\n\n return stadium, ground, distance, turn, track\n\n\ndef _recognize_grade(rgb_color: Tuple[int, ...]) -> Tuple[int, ...]:\n if imagetools.compare_color((247, 209, 41), rgb_color) > 0.9:\n # EX(URA)\n return (Race.GRADE_G1,)\n if imagetools.compare_color((54, 133, 228), rgb_color) > 0.8:\n return (Race.GRADE_G1,)\n if imagetools.compare_color((244, 85, 129), rgb_color) > 0.8:\n return (Race.GRADE_G2,)\n if imagetools.compare_color((57, 187, 85), rgb_color) > 0.8:\n return (Race.GRADE_G3,)\n if imagetools.compare_color((252, 169, 5), rgb_color) > 0.8:\n return Race.GRADE_OP, Race.GRADE_PRE_OP\n if imagetools.compare_color((148, 203, 8), rgb_color) > 0.8:\n return Race.GRADE_DEBUT, Race.GRADE_NOT_WINNING\n raise ValueError(\"_recognize_grade: unknown grade color: %s\" % (rgb_color,))\n\n\ndef _find_by_spec(\n date: Tuple[int, int, int],\n stadium: Text,\n ground: int,\n distance: int,\n turn: int,\n track: int,\n no1_fan_count: int,\n grades: Tuple[int, ...],\n):\n full_spec = (stadium, ground, distance, turn, track, no1_fan_count)\n for i in find_by_date(date):\n if i.grade not in grades:\n continue\n if full_spec == (\n i.stadium,\n i.ground,\n i.distance,\n i.turn,\n i.track,\n i.fan_counts[0],\n ):\n yield i\n\n\ndef find_by_race_detail_image(ctx: Context, screenshot: PIL.Image.Image) -> Race:\n rp = mathtools.ResizeProxy(screenshot.width)\n\n grade_color_pos = rp.vector2((10, 75), 466)\n spec_bbox = rp.vector4((27, 260, 302, 279), 466)\n _, no1_fan_count_pos = next(\n template.match(screenshot, templates.SINGLE_MODE_RACE_DETAIL_NO1_FAN_COUNT)\n )\n no1_fan_count_bbox = (\n rp.vector(150, 466),\n no1_fan_count_pos[1],\n rp.vector(400, 466),\n no1_fan_count_pos[1] + rp.vector(18, 466),\n )\n\n grades = _recognize_grade(\n tuple(cast.list_(screenshot.getpixel(grade_color_pos), int))\n )\n stadium, ground, distance, turn, track = _recognize_spec(screenshot.crop(spec_bbox))\n no1_fan_count = _recognize_fan_count(screenshot.crop(no1_fan_count_bbox))\n\n full_spec = (\n ctx.date,\n stadium,\n ground,\n distance,\n turn,\n track,\n no1_fan_count,\n grades,\n )\n for i in _find_by_spec(*full_spec):\n LOGGER.info(\"image match: %s\", i)\n return i\n\n raise ValueError(\"find_by_race_details_image: no race match spec: %s\", full_spec)\n\n\ndef _find_by_race_menu_item(ctx: Context, img: PIL.Image.Image) -> Iterator[Race]:\n rp = mathtools.ResizeProxy(img.width)\n spec_bbox = rp.vector4((221, 12, 478, 32), 492)\n no1_fan_count_bbox = rp.vector4((207, 54, 360, 72), 492)\n grade_color_pos = rp.vector2((182, 14), 492)\n\n stadium, ground, distance, turn, track = _recognize_spec(img.crop(spec_bbox))\n no1_fan_count = _recognize_fan_count(img.crop(no1_fan_count_bbox))\n grades = _recognize_grade(tuple(cast.list_(img.getpixel(grade_color_pos), int)))\n full_spec = (\n ctx.date,\n stadium,\n ground,\n distance,\n turn,\n track,\n no1_fan_count,\n grades,\n )\n match_count = 0\n for i in _find_by_spec(*full_spec):\n LOGGER.info(\"image match: %s\", i)\n yield i\n match_count += 1\n if not match_count:\n raise ValueError(\"_find_by_race_menu_item: no race match spec: %s\", full_spec)\n\n\ndef find_by_race_menu_image(\n ctx: Context, screenshot: PIL.Image.Image\n) -> Iterator[Tuple[Race, Tuple[int, int]]]:\n rp = mathtools.ResizeProxy(screenshot.width)\n for _, pos in template.match(screenshot, templates.SINGLE_MODE_RACE_MENU_FAN_ICON):\n _, y = pos\n bbox = (\n rp.vector(23, 540),\n y - rp.vector(51, 540),\n rp.vector(515, 540),\n y + rp.vector(46, 540),\n )\n for i in _find_by_race_menu_item(ctx, screenshot.crop(bbox)):\n yield i, pos\n"
] | [
[
"numpy.percentile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bladezzw/DeepQuantInChina | [
"ce74a9bf8db91e3545ccc3e7af81f80796a536fa"
] | [
"Model/BPnet.py"
] | [
"\nimport csvdata\n\n# some import works\nimport pandas as pd\nimport numpy as np\n\nimport torch\n\n\n\nfrom feature.features import features\n\n\n# Generate dummy data\ndatapath = csvdata.csvdatapaths[0] # 输入需要训练的文件\ndata = pd.read_csv(datapath) # 载入csv文件\ndate = np.array(data['date'])\nTime = np.array(data['time'])\nopen = np.array(data['open'])\nhigh = np.array(data['high'])\nlow = np.array(data['low'])\nclose = np.array(data['close'])\nvolume = np.array(data['volume'])\n\nFs = features(open, high, low, close, volume) # 这里有狠多特征,取需要的用\n\n\nx = Fs.close.astype('float32')\nlen_x = len(x)\ny = (np.sign(Fs.close[1:] - Fs.close[:-1])).astype(\"int\")\ny = np.insert(y, 0, 0) # 在0位插入一个nan,使其长度与x相等\n\n\n# from sklearn.preprocessing import LabelBinarizer\n# lb = LabelBinarizer()\n# lb.fit([1, 0, -1])\n# y = lb.transform(y_) # 转换成3列\n\n\n\nx_train_endpoint = int(len_x * 0.7) # 向下取整\nx_val_endpoint_ = int(len_x * 0.15) # 向下取整\n\n\nx_train = x[:x_train_endpoint]\ny_train = y[:x_train_endpoint]\n# x_val = x[x_train_endpoint: x_val_endpoint_]\n# y_val = y[x_train_endpoint: x_val_endpoint_]\nx_test = x[x_train_endpoint:]\ny_test = y[x_train_endpoint:]\n\n\n\n#\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\n\nclass MyDataset(Dataset):\n def __init__(self, x, y, window=5, transform=None, target_transform = None):\n\n xs = []\n for i in range(len(x)-window):\n x_ = x[i:i+window]\n label_ = y[i+window-1]\n xs.append((x_, label_))\n\n self.xs = xs # 最主要就是要生成这个list, 然后DataLoader中给index,通过getitem读取图片数据\n self.transform = transform\n self.target_transform = target_transform\n\n def __getitem__(self, index):\n x, label = self.xs[index]\n if self.transform is not None:\n print(\"self.transform is not None:\", self.transform is not None) # 在这里做transform,转为tensor等等\n\n return x, label\n\n def __len__(self):\n return len(self.xs)\n\ntrain_data = MyDataset(x=x_train, y=y_train, window=100)\ntest_data = MyDataset(x=x_test, y=y_test, window=100)\n\ntrain_bs = 100\ntest_bs = 100\ntrain_loader = DataLoader(dataset=train_data, batch_size=train_bs)\ntest_loader = DataLoader(dataset=test_data, batch_size=test_bs)\n\n\n\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nclass Net(nn.Module):\n def __init__(self):\n super(Net, self).__init__()\n self.fc1 = nn.Linear(100, 120)\n self.fc2 = nn.Linear(120, 84)\n self.fc3 = nn.Linear(84, 3)\n\n def forward(self, x):\n x = F.relu(self.fc1(x))\n x = F.relu(self.fc2(x))\n x = self.fc3(x)\n return x\n\nnet = Net()\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\nnet.to(device)\ncriterion = nn.SoftMarginLoss()\n# optimizer = optim.Adam(model.parameters(), lr=0.001)\nlr_init = 0.001\noptimizer = optim.SGD(net.parameters(), lr=lr_init, momentum=0.9, dampening=0.1) # 选择优化器\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=50, gamma=0.1) # 设置学习率下降策略\n\n\nfor epoch in range(10): # loop over the dataset multiple times\n\n running_loss = 0.0\n for i, data in enumerate(train_loader, 0):\n # get the inputs\n inputs, labels = data\n inputs, labels = inputs.to(device), labels.to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward + backward + optimize\n outputs = net(inputs)\n loss = criterion(outputs, labels)\n loss.backward()\n optimizer.step()\n\n # print statistics\n running_loss += loss.item()\n if i % 2000 == 1999: # print every 2000 mini-batches\n print('[%d, %5d] loss: %.3f' %\n (epoch + 1, i + 1, running_loss / 2000))\n running_loss = 0.0\n\n\nprint('Finished Training')\n"
] | [
[
"pandas.read_csv",
"torch.utils.data.DataLoader",
"numpy.sign",
"torch.nn.Linear",
"numpy.insert",
"torch.nn.SoftMarginLoss",
"torch.cuda.is_available",
"numpy.array",
"torch.optim.lr_scheduler.StepLR"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
LangDaniel/hpv_status | [
"a313631397ba6d6bbb47d0e43197d01f4da6f594"
] | [
"utils/convert.py"
] | [
"import sys\nimport os\nimport caffe.proto.caffe_pb2 as pb2\nimport numpy as np\nimport h5py\n\n# mainly taken from:\n# https://github.com/chuckcho/c3d-keras/blob/master/convert_caffe_model.py\n\nfile_in = sys.argv[1] \nfile_out = os.path.abspath(os.path.join(file_in, os.pardir))\nfile_out = os.path.join(file_out, 'C3D_weights.h5')\n\nprint('converting {}'.format(file_in))\nprint('to {}'.format(file_out))\n\np = pb2.NetParameter()\np.ParseFromString(\n open(file_in, 'rb').read()\n)\n\n# use this function to convert the first fully connected layer\ndef conv_first_fc(w):\n # kernel: (8192, 4096): (512x1x4x4, 4096) -> (1x4x4x512, 4096)\n wo = np.zeros_like(w)\n for i in range(w.shape[1]):\n wi = np.squeeze(w[:,i])\n wo[:,i] = np.transpose(np.reshape(wi, (512,4,4)), (1, 2, 0)).flatten()\n return wo\n\nwith h5py.File(file_out, 'w') as ff: \n for layer in p.layers:\n name = layer.name\n\n if 'conv' not in name and 'fc' not in name:\n continue\n\n print('converting layer:')\n print(name)\n\n # somehow blobs[0].length is deprecated therefore the height\n # has to be computed separate for the fc layers\n # in the conv layers the kernel size is always [3, 3, 3] \n num = layer.blobs[0].num\n ch = layer.blobs[0].channels\n width = layer.blobs[0].width\n\n bias = np.array(layer.blobs[1].diff, dtype=np.float32)\n kernel = np.array(layer.blobs[0].diff, dtype=np.float32)\n\n if 'conv' in name:\n shape = [num, ch, 3, 3, 3]\n kernel = kernel.reshape(shape)\n kernel = np.transpose(kernel, (2, 3, 4, 1, 0))\n else:\n height = kernel.shape[0] // width\n shape = [width, height]\n kernel = kernel.reshape(shape).T\n if 'fc6' in name:\n kernel = conv_first_fc(kernel)\n\n layer_grp = ff.create_group(name)\n layer_grp.create_dataset('bias', data=bias) \n layer_grp.create_dataset('kernel', data=kernel) \n"
] | [
[
"numpy.reshape",
"numpy.squeeze",
"numpy.zeros_like",
"numpy.transpose",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mdorier/Benchmarks | [
"793181dd0e793a11d1094101cc6321206c2b06a7"
] | [
"Pilot3/P3B1/p3b1_baseline_keras2.py"
] | [
"from __future__ import print_function\n\nimport numpy as np\n\nfrom keras import backend as K\n\nfrom keras.layers import Input, Dense, Dropout, Activation\nfrom keras.optimizers import SGD, Adam, RMSprop\nfrom keras.models import Model\nfrom keras.callbacks import ModelCheckpoint, CSVLogger, ReduceLROnPlateau\n\nfrom sklearn.metrics import f1_score\n\nimport p3b1 as bmk\nimport candle\n\ndef initialize_parameters():\n\n # Build benchmark object\n p3b1Bmk = bmk.BenchmarkP3B1(bmk.file_path, 'p3b1_default_model.txt', 'keras',\n prog='p3b1_baseline', desc='Multi-task (DNN) for data extraction from clinical reports - Pilot 3 Benchmark 1')\n \n # Initialize parameters\n gParameters = candle.initialize_parameters(p3b1Bmk)\n #bmk.logger.info('Params: {}'.format(gParameters))\n\n return gParameters\n\n\ndef fetch_data(gParameters):\n \"\"\" Downloads and decompresses the data if not locally available.\n Since the training data depends on the model definition it is not loaded,\n instead the local path where the raw data resides is returned\n \"\"\"\n\n path = gParameters['data_url']\n fpath = candle.fetch_file(path + gParameters['train_data'], 'Pilot3', untar=True)\n \n return fpath\n\n\ndef build_model(gParameters, kerasDefaults,\n shared_nnet_spec, individual_nnet_spec,\n input_dim, Y_train, Y_test,\n verbose=False):\n \n labels_train = []\n labels_test = []\n\n n_out_nodes = []\n\n for l in range( len( Y_train ) ):\n truth_train = np.array( Y_train[l], dtype='int32' )\n truth_test = np.array( Y_test[l], dtype='int32' )\n \n mv = int( np.max( truth_train ) )\n \n label_train = np.zeros( ( len( truth_train ), mv + 1 ) )\n for i in range( len( truth_train ) ):\n label_train[ i, truth_train[ i ] ] = 1\n \n label_test = np.zeros( ( len( truth_test ), mv + 1 ) )\n for i in range( len(truth_test) ):\n label_test[ i, truth_test[ i ] ] = 1\n\n labels_train.append( label_train )\n labels_test.append( label_test )\n\n n_out_nodes.append( mv + 1 )\n\n\n shared_layers = []\n\n\n # input layer\n layer = Input( shape = ( input_dim, ), name= 'input' )\n shared_layers.append( layer )\n\n\n # shared layers\n for k in range( len( shared_nnet_spec ) ):\n layer = Dense( shared_nnet_spec[ k ], activation=gParameters['activation'],\n name= 'shared_layer_' + str( k ) )( shared_layers[ -1 ] )\n if gParameters['drop'] > 0:\n layer = Dropout( gParameters['drop'] )( shared_layers[ -1 ] )\n shared_layers.append( layer )\n\n\n # individual layers\n indiv_layers_arr = []\n models = []\n\n trainable_count = 0\n non_trainable_count = 0\n\n for l in range( len( individual_nnet_spec ) ):\n indiv_layers = [ shared_layers[-1] ]\n for k in range( len( individual_nnet_spec[l] ) + 1 ):\n if k < len( individual_nnet_spec[l] ):\n layer = Dense( individual_nnet_spec[l][k], activation=gParameters['activation'],\n name= 'indiv_layer_' + str( l ) + '_' + str( k ) )( indiv_layers[-1] )\n indiv_layers.append( layer )\n if gParameters['drop'] > 0:\n layer = Dropout( gParameters['drop'] )( indiv_layers[-1] )\n indiv_layers.append( layer )\n else:\n layer = Dense( n_out_nodes[l], activation=gParameters['out_activation'],\n name= 'out_' + str( l ) )( indiv_layers[-1] )\n indiv_layers.append( layer )\n\n indiv_layers_arr.append( indiv_layers )\n\n model = Model( inputs=[shared_layers[0]], outputs=[indiv_layers[-1]] )\n\n # calculate trainable/non-trainable param count for each model\n param_counts = candle.compute_trainable_params(model)\n trainable_count += param_counts['trainable_params']\n non_trainable_count += param_counts['non_trainable_params']\n\n models.append( model )\n\n # capture total param counts\n gParameters['trainable_params'] = trainable_count\n gParameters['non_trainable_params'] = non_trainable_count\n gParameters['total_params'] = trainable_count + non_trainable_count\n\n # Define optimizer\n optimizer = candle.build_optimizer(gParameters['optimizer'],\n gParameters['learning_rate'],\n kerasDefaults)\n\n # DEBUG - verify\n if verbose:\n for k in range( len( models ) ):\n model = models[k]\n print('Model: ', k)\n model.summary()\n\n for k in range( len( models ) ):\n model = models[ k ]\n model.compile( loss=gParameters['loss'], optimizer=optimizer, metrics=[gParameters['metrics']] )\n\n return models, labels_train, labels_test\n\n\ndef train_model(gParameters, models,\n X_train, Y_train,\n X_test, Y_test,\n fold, verbose=False):\n\n base_run_id = gParameters['run_id']\n\n for epoch in range( gParameters['epochs'] ):\n for k in range( len( models ) ):\n\n model = models[ k ]\n\n gParameters['run_id'] = base_run_id + \".{}.{}.{}\".format(fold, epoch, k)\n candleRemoteMonitor = candle.CandleRemoteMonitor(params=gParameters)\n timeoutMonitor = candle.TerminateOnTimeOut(gParameters['timeout'])\n\n model.fit( { 'input': X_train[k] }, { 'out_' + str( k ) : Y_train[k] }, epochs=1, verbose=verbose,\n callbacks= [ candleRemoteMonitor, timeoutMonitor ],\n batch_size= gParameters['batch_size'], validation_data= ( X_test[k], Y_test[k] ) )\n\n\n return models\n\n\ndef evaluate_model(X_test, truths_test, labels_test, models):\n\n # retrieve truth-pred pair\n avg_loss = 0.0\n ret = []\n\n for k in range( len( models ) ):\n ret_k = []\n\n feature_test = X_test[ k ]\n truth_test = truths_test[ k ]\n label_test = labels_test[ k ]\n model = models[ k ]\n\n loss = model.evaluate( feature_test, label_test )\n avg_loss = avg_loss + loss[0]\n print(\"In EVALUATE loss: \", loss)\n\n pred = model.predict( feature_test )\n\n ret_k.append( truth_test )\n ret_k.append( np.argmax( pred, axis= 1 ) )\n\n ret.append( ret_k )\n\n avg_loss = avg_loss / float( len( models ) )\n ret.append( avg_loss )\n\n return ret\n\n\ndef run(gParameters):\n\n fpath = fetch_data(gParameters)\n # Get default parameters for initialization and optimizer functions\n kerasDefaults = candle.keras_default_config()\n\n # Construct structures common to all folds\n# shared_nnet_spec = []\n# elem = gParameters['shared_nnet_spec'].split( ',' )\n# for el in elem:\n# shared_nnet_spec.append( int( el ) )\n\n# individual_nnet_spec = []\n# indiv = gParameters['ind_nnet_spec'].split( ':' )\n# for ind in indiv:\n# indiv_nnet_spec = []\n# elem = ind.split( ',' )\n# for el in elem:\n# indiv_nnet_spec.append( int( el ) )\n# individual_nnet_spec.append( indiv_nnet_spec )\n\n shared_nnet_spec = gParameters['shared_nnet_spec']\n individual_nnet_spec = gParameters['ind_nnet_spec']\n\n # Construct features common to all folds\n features = []\n feat = gParameters['feature_names'].split(':')\n for f in feat:\n features.append(f)\n\n n_feat = len(feat)\n print('Feature names:')\n for i in range(n_feat):\n print(features[i])\n\n\n # initialize arrays for all the features\n truth_array = [[] for _ in range(n_feat)]\n pred_array = [[] for _ in range(n_feat)]\n avg_loss = 0.0\n\n\n # stdout display level\n verbose = True\n\n # per fold\n for fold in range( gParameters['n_fold'] ):\n\n # build data\n X_train, Y_train, X_test, Y_test = bmk.build_data( len(individual_nnet_spec), fold, fpath )\n\n # build model\n input_dim = len( X_train[0][0] )\n models, labels_train, labels_test = build_model(gParameters, kerasDefaults,\n shared_nnet_spec, individual_nnet_spec,\n input_dim, Y_train, Y_test, verbose)\n\n # train model\n models = train_model(gParameters, models,\n X_train, labels_train,\n X_test, labels_test,\n fold, verbose)\n\n # evaluate model\n ret = evaluate_model(X_test, Y_test, labels_test, models)\n\n for i in range(n_feat):\n truth_array[i].extend(ret[i][0])\n pred_array[i].extend(ret[i][1])\n\n avg_loss += ret[ -1 ]\n\n\n avg_loss /= float( gParameters['n_fold'] )\n\n for task in range(n_feat):\n print('Task',task+1,':',features[task],'- Macro F1 score', f1_score(truth_array[task], pred_array[task], average='macro'))\n print('Task',task+1,':',features[task],'- Micro F1 score', f1_score(truth_array[task], pred_array[task], average='micro'))\n\n\n return avg_loss\n\n\ndef main():\n\n gParameters = initialize_parameters()\n avg_loss = run(gParameters)\n print( \"Average loss: \", avg_loss )\n\n\n\nif __name__ == '__main__':\n main()\n try:\n K.clear_session()\n except AttributeError: # theano does not have this function\n pass\n"
] | [
[
"numpy.max",
"sklearn.metrics.f1_score",
"numpy.array",
"numpy.argmax"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
CySecWee/ML | [
"972f21a70396d2968856799aec1be8b7409bed73"
] | [
"ML2/logical_regression/lable_encoder2.py"
] | [
"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import LabelEncoder\n\n# creating initial dataframe\nbridge_types = ('Arch','Beam','Truss','Cantilever','Tied Arch','Suspension','Cable')\nbridge_df = pd.DataFrame(bridge_types, columns=['Bridge_Types'])\n# creating instance of labelencoder\nlabelencoder = LabelEncoder()\n# Assigning numerical values and storing in another column\nbridge_df['Bridge_Types_Cat'] = labelencoder.fit_transform(bridge_df['Bridge_Types'])\nprint(bridge_df)\n"
] | [
[
"sklearn.preprocessing.LabelEncoder",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
qq874938383/CIIC | [
"837f7363c09e71fe1548d8fa94bd5a2fa568074a"
] | [
"maskrcnn_benchmark/data/datasets/cityscapes.py"
] | [
"import os\nimport glob\nimport json\nfrom PIL import Image\n\n\nimport numpy as np\nimport torch\nimport torchvision\n\n\nfrom maskrcnn_benchmark.structures.bounding_box import BoxList\nfrom maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask\nfrom .abstract import AbstractDataset\n\n\n\nclass CityScapesDataset(AbstractDataset):\n def __init__(\n self,\n img_dir,\n ann_dir,\n split,\n mode=\"mask\",\n transforms=None,\n min_area=0,\n mini=None,\n ):\n \"\"\"\n Arguments:\n img_dir: /path/to/leftImg8bit/ has to contain {train,val,test}\n ann_dir: /path/to/gtFine/ has to contain {train,val,test}\n split: \"train\" or \"val\" or \"test\"\n mode: \"poly\" or \"mask\", which annotation format to use\n transforms: apply transformations to input/annotation\n min_area: exclude intances below a specific area (bbox area)\n mini: limit the size of the dataset, so len(dataset) == mini for\n debugging purposes\n \"\"\"\n assert split in [\"train\", \"val\", \"test\"]\n\n img_dir = os.path.abspath(os.path.join(img_dir, split))\n ann_dir = os.path.abspath(os.path.join(ann_dir, split))\n\n assert os.path.exists(img_dir), img_dir\n assert os.path.exists(ann_dir), ann_dr\n\n self.ann_dir = ann_dir\n\n self.split = split\n self.CLASSES = [\"__background__\"]\n self.CLASSES += [l.name for l in csHelpers.labels if l.hasInstances]\n\n # Adds name_to_id and id_to_name mapping\n self.initMaps()\n\n # This is required for parsing binary masks\n self.cityscapesID_to_ind = {\n l.id: self.name_to_id[l.name] for l in csHelpers.labels if l.hasInstances\n }\n\n self.transforms = transforms\n self.min_area = int(min_area)\n\n img_pattern = os.path.join(img_dir, \"*\", \"*_leftImg8bit.png\")\n img_paths = sorted(glob.glob(img_pattern))\n\n if mode == \"mask\":\n ann_pattern = os.path.join(ann_dir, \"*\", \"*_instanceIds.png\")\n elif mode == \"poly\":\n ann_pattern = os.path.join(ann_dir, \"*\", \"*_polygons.json\")\n else:\n raise NotImplementedError(\"Mode is not implemented yet: %s\" % mode)\n\n self.mode = mode\n ann_paths = sorted(glob.glob(ann_pattern))\n\n if mini is not None:\n # Keep the mini dataset diverse by setting the stride\n img_paths = img_paths[:: len(img_paths) // mini + 1]\n ann_paths = ann_paths[:: len(ann_paths) // mini + 1]\n\n assert len(img_paths) == len(ann_paths)\n\n self.img_paths = img_paths\n self.ann_paths = ann_paths\n\n def __getitem__(self, idx):\n img_path = self.img_paths[idx]\n ann_path = self.ann_paths[idx]\n\n if self.mode == \"mask\":\n ann = torch.from_numpy(np.asarray(Image.open(ann_path)))\n # masks are represented with tensors\n boxes, segmentations, labels = self._processBinayMasks(ann)\n else:\n with open(ann_path, \"r\") as ann_file:\n ann = json.load(ann_file)\n # masks are represented with polygons\n boxes, segmentations, labels = self._processPolygons(ann)\n\n boxes, segmentations, labels = self._filterGT(boxes, segmentations, labels)\n\n if len(segmentations) == 0:\n empty_ann_path = self.get_img_info(idx)[\"ann_path\"]\n print(\"EMPTY ENTRY:\", empty_ann_path)\n # self.img_paths.pop(idx)\n # self.ann_paths.pop(idx)\n img, target, _ = self[(idx + 1) % len(self)]\n\n # just override this image with the next\n return img, target, idx\n\n img = Image.open(img_path)\n # Compose all into a BoxList instance\n target = BoxList(boxes, img.size, mode=\"xyxy\")\n target.add_field(\"labels\", torch.tensor(labels))\n masks = SegmentationMask(segmentations, img.size, mode=self.mode)\n target.add_field(\"masks\", masks)\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target, idx\n\n def _filterGT(self, boxes, segmentations, labels):\n filtered_boxes = []\n filtered_segmentations = []\n filtered_labels = []\n assert len(segmentations) == len(labels) == len(boxes)\n\n for box, segmentation, label in zip(boxes, segmentations, labels):\n xmin, ymin, xmax, ymax = box\n area = (xmax - xmin) * (ymax - ymin)\n if area < self.min_area:\n continue\n\n filtered_boxes.append(box)\n filtered_segmentations.append(segmentation)\n filtered_labels.append(label)\n\n if len(filtered_boxes) < 1:\n filtered_boxes = torch.empty(0, 4)\n\n return filtered_boxes, filtered_segmentations, filtered_labels\n\n def _processPolygons(self, ann):\n # For a single object polygon annotations are stored in CityScapes like\n # [[x1, y1], [x2, y2]...] and we need them in the following format:\n # [x1, y1, x2, y2, x3, y3 ...]\n polys = []\n labels = []\n boxes = []\n\n def poly_to_tight_box(poly):\n xmin = int(min(poly[::2]))\n ymin = int(min(poly[1::2]))\n xmax = int(max(poly[::2]))\n ymax = int(max(poly[1::2]))\n bbox = xmin, ymin, xmax, ymax\n return bbox\n\n for inst in ann[\"objects\"]:\n label = inst[\"label\"]\n if label not in self.CLASSES:\n continue\n\n label = self.name_to_id[label]\n\n cityscapes_poly = inst[\"polygon\"]\n poly = []\n for xy in cityscapes_poly:\n # Equivalent with `poly += xy` but this is more verbose\n x = xy[0]\n y = xy[1]\n poly.append(x)\n poly.append(y)\n\n # In CityScapes instances are described with single polygons only\n box = poly_to_tight_box(poly)\n\n boxes.append(box)\n polys.append([poly])\n labels.append(label)\n\n if len(boxes) < 1:\n boxes = torch.empty(0, 4)\n\n return boxes, polys, labels\n\n def _processBinayMasks(self, ann):\n boxes = []\n masks = []\n labels = []\n\n def mask_to_tight_box(mask):\n a = mask.nonzero()\n bbox = [\n torch.min(a[:, 1]),\n torch.min(a[:, 0]),\n torch.max(a[:, 1]),\n torch.max(a[:, 0]),\n ]\n bbox = list(map(int, bbox))\n return bbox # xmin, ymin, xmax, ymax\n\n # Sort for consistent order between instances as the polygon annotation\n instIds = torch.sort(torch.unique(ann))[0]\n for instId in instIds:\n if instId < 1000: # group labels\n continue\n\n mask = ann == instId\n label = int(instId / 1000)\n label = self.cityscapesID_to_ind[label]\n box = mask_to_tight_box(mask)\n\n boxes.append(box)\n masks.append(mask)\n labels.append(label)\n\n return boxes, masks, labels\n\n def __len__(self):\n return len(self.img_paths)\n\n def get_img_info(self, index):\n # Reverse engineered from voc.py\n # All the images have the same size\n return {\n \"height\": 1024,\n \"width\": 2048,\n \"idx\": index,\n \"img_path\": self.img_paths[index],\n \"ann_path\": self.ann_paths[index],\n }\n"
] | [
[
"torch.max",
"torch.empty",
"torch.min",
"torch.tensor",
"torch.unique"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NYU-DICE-Lab/open_clip | [
"fd71804b503135fb1c7cc8de3a0d6599741c8ed9"
] | [
"src/training/main.py"
] | [
"import logging\nimport os\nimport random\nfrom datetime import datetime\n\nimport numpy as np\nimport torch\nimport torch.backends.cudnn as cudnn\nfrom torch import optim\nfrom torch.cuda.amp import GradScaler\n\ntry:\n import wandb\nexcept ImportError:\n wandb = None\n\ntry:\n import torch.utils.tensorboard as tensorboard\nexcept ImportError:\n tensorboard = None\n\ntry:\n import horovod.torch as hvd\nexcept ImportError:\n hvd = None\n\nfrom open_clip import create_model_and_transforms, trace_model\nfrom training.data import get_data\nfrom training.distributed import is_master, init_distributed_device, world_info_from_env\nfrom training.logger import setup_logging\nfrom training.params import parse_args\nfrom training.scheduler import cosine_lr\nfrom training.train import train_one_epoch, evaluate\n\n\ndef random_seed(seed=42, rank=0):\n torch.manual_seed(seed + rank)\n np.random.seed(seed + rank)\n random.seed(seed + rank)\n\n\ndef main():\n args = parse_args()\n\n # sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule?\n args.model = args.model.replace('/', '-')\n\n # get the name of the experiments\n if args.name is None:\n args.name = '-'.join([\n datetime.now().strftime(\"%Y_%m_%d-%H_%M_%S\"),\n f\"model_{args.model}\",\n f\"lr_{args.lr}\",\n f\"b_{args.batch_size}\",\n f\"j_{args.workers}\",\n f\"p_{args.precision}\",\n ])\n\n # discover initial world args early so we can log properly\n args.distributed = False\n args.local_rank, args.rank, args.world_size = world_info_from_env()\n\n args.log_path = None\n if is_master(args, local=args.log_local):\n log_base_path = os.path.join(args.logs, args.name)\n os.makedirs(log_base_path, exist_ok=True)\n log_filename = f'out-{args.rank}' if args.log_local else 'out.log'\n args.log_path = os.path.join(log_base_path, log_filename)\n if os.path.exists(args.log_path):\n print(\n \"Error. Experiment already exists. Use --name {} to specify a new experiment.\"\n )\n return -1\n\n # Set logger\n args.log_level = logging.DEBUG if args.debug else logging.INFO\n setup_logging(args.log_path, args.log_level)\n\n # fully initialize distributed device environment\n device = init_distributed_device(args)\n\n args.wandb = 'wandb' in args.report_to or 'all' in args.report_to\n args.tensorboard = 'tensorboard' in args.report_to or 'all' in args.report_to\n if is_master(args):\n args.tensorboard_path = os.path.join(args.logs, args.name, \"tensorboard\") if args.tensorboard else ''\n args.checkpoint_path = os.path.join(args.logs, args.name, \"checkpoints\")\n for dirname in [args.tensorboard_path, args.checkpoint_path]:\n if dirname:\n os.makedirs(dirname, exist_ok=True)\n else:\n args.tensorboard_path = ''\n args.checkpoint_path = ''\n\n if args.copy_codebase:\n copy_codebase(args)\n\n assert args.precision in ['amp', 'fp16', 'fp32']\n if args.precision == 'fp16':\n logging.warning(\n 'It is recommended to use AMP mixed-precision instead of FP16. '\n 'FP16 support needs further verification and tuning, especially for train.')\n\n if args.horovod:\n logging.info(\n f'Running in horovod mode with multiple processes / nodes. Device: {args.device}.'\n f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.')\n elif args.distributed:\n logging.info(\n f'Running in distributed mode with multiple processes. Device: {args.device}.'\n f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.')\n else:\n logging.info(f'Running with a single process. Device {args.device}.')\n\n model, preprocess_train, preprocess_val = create_model_and_transforms(\n args.model,\n args.pretrained,\n precision=args.precision,\n device=device,\n jit=args.torchscript,\n force_quick_gelu=args.force_quick_gelu,\n pretrained_image=args.pretrained_image,\n )\n\n if args.trace:\n model = trace_model(model, batch_size=args.batch_size, device=device)\n\n if args.lock_image:\n # lock image tower as per LiT - https://arxiv.org/abs/2111.07991\n model.lock_image_tower(\n unlocked_groups=args.lock_image_unlocked_groups,\n freeze_bn_stats=args.lock_image_freeze_bn_stats)\n\n if is_master(args):\n logging.info(\"Model:\")\n logging.info(f\"{str(model)}\")\n logging.info(\"Params:\")\n params_file = os.path.join(args.logs, args.name, \"params.txt\")\n with open(params_file, \"w\") as f:\n for name in sorted(vars(args)):\n val = getattr(args, name)\n logging.info(f\" {name}: {val}\")\n f.write(f\"{name}: {val}\\n\")\n\n if args.distributed and not args.horovod:\n if args.use_bn_sync:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n ddp_args = {}\n if args.ddp_static_graph:\n # this doesn't exist in older PyTorch, arg only added if enabled\n ddp_args['static_graph'] = True\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], **ddp_args)\n\n data = get_data(args, (preprocess_train, preprocess_val))\n assert len(data), 'At least one train or eval dataset must be specified.'\n if args.trace:\n assert 'train' not in data, 'Cannot train with traced model'\n\n exclude = lambda n, p: p.ndim < 2 or \"bn\" in n or \"ln\" in n or \"bias\" in n or 'logit_scale' in n\n include = lambda n, p: not exclude(n, p)\n\n named_parameters = list(model.named_parameters())\n gain_or_bias_params = [p for n, p in named_parameters if exclude(n, p) and p.requires_grad]\n rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad]\n\n if args.train_data is None:\n optimizer = None\n scheduler = None\n else:\n optimizer = optim.AdamW(\n [\n {\"params\": gain_or_bias_params, \"weight_decay\": 0.},\n {\"params\": rest_params, \"weight_decay\": args.wd},\n ],\n lr=args.lr,\n betas=(args.beta1, args.beta2),\n eps=args.eps,\n )\n total_steps = data[\"train\"].dataloader.num_batches * args.epochs\n scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps)\n\n if args.horovod:\n optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())\n hvd.broadcast_parameters(model.state_dict(), root_rank=0)\n hvd.broadcast_optimizer_state(optimizer, root_rank=0)\n\n scaler = GradScaler() if args.precision == \"amp\" else None\n # optionally resume from a checkpoint\n start_epoch = 0\n if args.resume is not None:\n if os.path.isfile(args.resume):\n checkpoint = torch.load(args.resume, map_location=device)\n if 'epoch' in checkpoint:\n # resuming a train checkpoint w/ epoch and optimizer state\n start_epoch = checkpoint[\"epoch\"]\n sd = checkpoint[\"state_dict\"]\n if not args.distributed and next(iter(sd.items()))[0].startswith('module'):\n sd = {k[len('module.'):]: v for k, v in sd.items()}\n model.load_state_dict(sd)\n if optimizer is not None:\n optimizer.load_state_dict(checkpoint[\"optimizer\"])\n if scaler is not None and 'scaler' in checkpoint:\n scaler.load_state_dict(checkpoint['scaler'])\n logging.info(f\"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})\")\n else:\n # loading a bare (model only) checkpoint for fine-tune or evaluation\n model.load_state_dict(checkpoint)\n logging.info(f\"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})\")\n else:\n logging.info(\"=> no checkpoint found at '{}'\".format(args.resume))\n\n cudnn.benchmark = True\n cudnn.deterministic = False\n\n # determine if this worker should save logs and checkpoints. only do so if it is rank == 0\n args.save_logs = args.logs and args.logs.lower() != 'none' and is_master(args)\n writer = None\n if args.save_logs and args.tensorboard:\n assert tensorboard is not None, \"Please install tensorboard.\"\n writer = tensorboard.SummaryWriter(args.tensorboard_path)\n\n if args.wandb and is_master(args):\n assert wandb is not None, 'Please install wandb.'\n logging.debug('Starting wandb.')\n args.train_sz = data[\"train\"].dataloader.num_samples\n if args.val_data is not None:\n args.val_sz = data[\"val\"].dataloader.num_samples\n # you will have to configure this for your project!\n wandb.init(\n project=\"open-clip\",\n notes=args.wandb_notes,\n tags=[],\n config=vars(args),\n )\n if args.debug:\n wandb.watch(model, log='all')\n wandb.save(params_file)\n logging.debug('Finished loading wandb.')\n\n if 'train' not in data:\n evaluate(model, data, start_epoch, args, writer)\n return\n elif start_epoch == 0 and 'val' in data:\n evaluate(model, data, 0, args, writer)\n\n for epoch in range(start_epoch, args.epochs):\n if is_master(args):\n logging.info(f'Start epoch {epoch}')\n\n train_one_epoch(model, data, epoch, optimizer, scaler, scheduler, args, writer)\n completed_epoch = epoch + 1\n\n if any(v in data for v in ('val', 'imagenet-val', 'imagenet-v2')):\n evaluate(model, data, completed_epoch, args, writer)\n\n # Saving checkpoints.\n if args.save_logs:\n checkpoint_dict = {\n \"epoch\": completed_epoch,\n \"name\": args.name,\n \"state_dict\": model.state_dict(),\n \"optimizer\": optimizer.state_dict(),\n }\n if scaler is not None:\n checkpoint_dict[\"scaler\"] = scaler.state_dict()\n\n if completed_epoch == args.epochs or (\n args.save_frequency > 0 and (completed_epoch % args.save_frequency) == 0\n ):\n torch.save(\n checkpoint_dict,\n os.path.join(args.checkpoint_path, f\"epoch_{completed_epoch}.pt\"),\n )\n if args.save_most_recent:\n torch.save(\n checkpoint_dict,\n os.path.join(args.checkpoint_path, f\"epoch_latest.pt\"),\n )\n\n if args.wandb and is_master(args):\n wandb.finish()\n\n\ndef copy_codebase(args):\n from shutil import copytree, ignore_patterns\n new_code_path = os.path.join(args.logs, args.name, \"code\")\n if os.path.exists(new_code_path):\n print(\n f\"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment.\"\n )\n return -1\n print(f\"Copying codebase to {new_code_path}\")\n current_code_path = os.path.realpath(__file__)\n for _ in range(3):\n current_code_path = os.path.dirname(current_code_path)\n copytree(current_code_path, new_code_path, ignore=ignore_patterns('log', 'logs', 'wandb'))\n print(\"Done copying code.\")\n return 1\n\n\nif __name__ == \"__main__\":\n main()\n"
] | [
[
"numpy.random.seed",
"torch.load",
"torch.manual_seed",
"torch.nn.SyncBatchNorm.convert_sync_batchnorm",
"torch.cuda.amp.GradScaler",
"torch.optim.AdamW",
"torch.utils.tensorboard.SummaryWriter",
"torch.nn.parallel.DistributedDataParallel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
AdriBenben/Scrib-AI | [
"bbafeeac3a0ddcf86996b65c4c416703c10fed91"
] | [
"AI/model.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Modifications Copyright 2017 Abigail See\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"This file contains code to build and run the tensorflow graph for the sequence-to-sequence model\"\"\"\n\nimport os\nimport time\nimport numpy as np\nimport tensorflow as tf\nfrom attention_decoder import attention_decoder\nfrom tensorflow.contrib.tensorboard.plugins import projector\n\nFLAGS = tf.app.flags.FLAGS\n\nclass SummarizationModel(object):\n \"\"\"A class to represent a sequence-to-sequence model for text summarization. Supports both baseline mode, pointer-generator mode, and coverage\"\"\"\n\n def __init__(self, hps, vocab):\n self._hps = hps\n self._vocab = vocab\n\n def _add_placeholders(self):\n \"\"\"Add placeholders to the graph. These are entry points for any input data.\"\"\"\n hps = self._hps\n\n # encoder part\n self._enc_batch = tf.placeholder(tf.int32, [hps.batch_size, None], name='enc_batch')\n self._enc_lens = tf.placeholder(tf.int32, [hps.batch_size], name='enc_lens')\n self._enc_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, None], name='enc_padding_mask')\n if FLAGS.pointer_gen:\n self._enc_batch_extend_vocab = tf.placeholder(tf.int32, [hps.batch_size, None], name='enc_batch_extend_vocab')\n self._max_art_oovs = tf.placeholder(tf.int32, [], name='max_art_oovs')\n\n # decoder part\n self._dec_batch = tf.placeholder(tf.int32, [hps.batch_size, hps.max_dec_steps], name='dec_batch')\n self._target_batch = tf.placeholder(tf.int32, [hps.batch_size, hps.max_dec_steps], name='target_batch')\n self._dec_padding_mask = tf.placeholder(tf.float32, [hps.batch_size, hps.max_dec_steps], name='dec_padding_mask')\n\n if hps.mode==\"decode\" and hps.coverage:\n self.prev_coverage = tf.placeholder(tf.float32, [hps.batch_size, None], name='prev_coverage')\n\n\n def _make_feed_dict(self, batch, just_enc=False):\n \"\"\"Make a feed dictionary mapping parts of the batch to the appropriate placeholders.\n\n Args:\n batch: Batch object\n just_enc: Boolean. If True, only feed the parts needed for the encoder.\n \"\"\"\n feed_dict = {}\n feed_dict[self._enc_batch] = batch.enc_batch\n feed_dict[self._enc_lens] = batch.enc_lens\n feed_dict[self._enc_padding_mask] = batch.enc_padding_mask\n if FLAGS.pointer_gen:\n feed_dict[self._enc_batch_extend_vocab] = batch.enc_batch_extend_vocab\n feed_dict[self._max_art_oovs] = batch.max_art_oovs\n if not just_enc:\n feed_dict[self._dec_batch] = batch.dec_batch\n feed_dict[self._target_batch] = batch.target_batch\n feed_dict[self._dec_padding_mask] = batch.dec_padding_mask\n return feed_dict\n\n def _add_encoder(self, encoder_inputs, seq_len):\n \"\"\"Add a single-layer bidirectional LSTM encoder to the graph.\n\n Args:\n encoder_inputs: A tensor of shape [batch_size, <=max_enc_steps, emb_size].\n seq_len: Lengths of encoder_inputs (before padding). A tensor of shape [batch_size].\n\n Returns:\n encoder_outputs:\n A tensor of shape [batch_size, <=max_enc_steps, 2*hidden_dim]. It's 2*hidden_dim because it's the concatenation of the forwards and backwards states.\n fw_state, bw_state:\n Each are LSTMStateTuples of shape ([batch_size,hidden_dim],[batch_size,hidden_dim])\n \"\"\"\n with tf.variable_scope('encoder'):\n cell_fw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n cell_bw = tf.contrib.rnn.LSTMCell(self._hps.hidden_dim, initializer=self.rand_unif_init, state_is_tuple=True)\n (encoder_outputs, (fw_st, bw_st)) = tf.nn.bidirectional_dynamic_rnn(cell_fw, cell_bw, encoder_inputs, dtype=tf.float32, sequence_length=seq_len, swap_memory=True)\n encoder_outputs = tf.concat(axis=2, values=encoder_outputs) # concatenate the forwards and backwards states\n return encoder_outputs, fw_st, bw_st\n\n\n def _reduce_states(self, fw_st, bw_st):\n \"\"\"Add to the graph a linear layer to reduce the encoder's final FW and BW state into a single initial state for the decoder. This is needed because the encoder is bidirectional but the decoder is not.\n\n Args:\n fw_st: LSTMStateTuple with hidden_dim units.\n bw_st: LSTMStateTuple with hidden_dim units.\n\n Returns:\n state: LSTMStateTuple with hidden_dim units.\n \"\"\"\n hidden_dim = self._hps.hidden_dim\n with tf.variable_scope('reduce_final_st'):\n\n # Define weights and biases to reduce the cell and reduce the state\n w_reduce_c = tf.get_variable('w_reduce_c', [hidden_dim * 2, hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n w_reduce_h = tf.get_variable('w_reduce_h', [hidden_dim * 2, hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n bias_reduce_c = tf.get_variable('bias_reduce_c', [hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n bias_reduce_h = tf.get_variable('bias_reduce_h', [hidden_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n\n # Apply linear layer\n old_c = tf.concat(axis=1, values=[fw_st.c, bw_st.c]) # Concatenation of fw and bw cell\n old_h = tf.concat(axis=1, values=[fw_st.h, bw_st.h]) # Concatenation of fw and bw state\n new_c = tf.nn.relu(tf.matmul(old_c, w_reduce_c) + bias_reduce_c) # Get new cell from old cell\n new_h = tf.nn.relu(tf.matmul(old_h, w_reduce_h) + bias_reduce_h) # Get new state from old state\n return tf.contrib.rnn.LSTMStateTuple(new_c, new_h) # Return new cell and state\n\n\n def _add_decoder(self, inputs):\n \"\"\"Add attention decoder to the graph. In train or eval mode, you call this once to get output on ALL steps. In decode (beam search) mode, you call this once for EACH decoder step.\n\n Args:\n inputs: inputs to the decoder (word embeddings). A list of tensors shape (batch_size, emb_dim)\n\n Returns:\n outputs: List of tensors; the outputs of the decoder\n out_state: The final state of the decoder\n attn_dists: A list of tensors; the attention distributions\n p_gens: A list of scalar tensors; the generation probabilities\n coverage: A tensor, the current coverage vector\n \"\"\"\n hps = self._hps\n cell = tf.contrib.rnn.LSTMCell(hps.hidden_dim, state_is_tuple=True, initializer=self.rand_unif_init)\n\n prev_coverage = self.prev_coverage if hps.mode==\"decode\" and hps.coverage else None # In decode mode, we run attention_decoder one step at a time and so need to pass in the previous step's coverage vector each time\n\n outputs, out_state, attn_dists, p_gens, coverage = attention_decoder(inputs, self._dec_in_state, self._enc_states, self._enc_padding_mask, cell, initial_state_attention=(hps.mode==\"decode\"), pointer_gen=hps.pointer_gen, use_coverage=hps.coverage, prev_coverage=prev_coverage)\n\n return outputs, out_state, attn_dists, p_gens, coverage\n\n def _calc_final_dist(self, vocab_dists, attn_dists):\n \"\"\"Calculate the final distribution, for the pointer-generator model\n\n Args:\n vocab_dists: The vocabulary distributions. List length max_dec_steps of (batch_size, vsize) arrays. The words are in the order they appear in the vocabulary file.\n attn_dists: The attention distributions. List length max_dec_steps of (batch_size, attn_len) arrays\n\n Returns:\n final_dists: The final distributions. List length max_dec_steps of (batch_size, extended_vsize) arrays.\n \"\"\"\n with tf.variable_scope('final_distribution'):\n # Multiply vocab dists by p_gen and attention dists by (1-p_gen)\n vocab_dists = [p_gen * dist for (p_gen,dist) in zip(self.p_gens, vocab_dists)]\n attn_dists = [(1-p_gen) * dist for (p_gen,dist) in zip(self.p_gens, attn_dists)]\n\n # Concatenate some zeros to each vocabulary dist, to hold the probabilities for in-article OOV words\n extended_vsize = self._vocab.size() + self._max_art_oovs # the maximum (over the batch) size of the extended vocabulary\n extra_zeros = tf.zeros((self._hps.batch_size, self._max_art_oovs))\n vocab_dists_extended = [tf.concat(axis=1, values=[dist, extra_zeros]) for dist in vocab_dists] # list length max_dec_steps of shape (batch_size, extended_vsize)\n\n # Project the values in the attention distributions onto the appropriate entries in the final distributions\n # This means that if a_i = 0.1 and the ith encoder word is w, and w has index 500 in the vocabulary, then we add 0.1 onto the 500th entry of the final distribution\n # This is done for each decoder timestep.\n # This is fiddly; we use tf.scatter_nd to do the projection\n batch_nums = tf.range(0, limit=self._hps.batch_size) # shape (batch_size)\n batch_nums = tf.expand_dims(batch_nums, 1) # shape (batch_size, 1)\n attn_len = tf.shape(self._enc_batch_extend_vocab)[1] # number of states we attend over\n batch_nums = tf.tile(batch_nums, [1, attn_len]) # shape (batch_size, attn_len)\n indices = tf.stack( (batch_nums, self._enc_batch_extend_vocab), axis=2) # shape (batch_size, enc_t, 2)\n shape = [self._hps.batch_size, extended_vsize]\n attn_dists_projected = [tf.scatter_nd(indices, copy_dist, shape) for copy_dist in attn_dists] # list length max_dec_steps (batch_size, extended_vsize)\n\n # Add the vocab distributions and the copy distributions together to get the final distributions\n # final_dists is a list length max_dec_steps; each entry is a tensor shape (batch_size, extended_vsize) giving the final distribution for that decoder timestep\n # Note that for decoder timesteps and examples corresponding to a [PAD] token, this is junk - ignore.\n final_dists = [vocab_dist + copy_dist for (vocab_dist,copy_dist) in zip(vocab_dists_extended, attn_dists_projected)]\n\n return final_dists\n\n def _add_emb_vis(self, embedding_var):\n \"\"\"Do setup so that we can view word embedding visualization in Tensorboard, as described here:\n https://www.tensorflow.org/get_started/embedding_viz\n Make the vocab metadata file, then make the projector config file pointing to it.\"\"\"\n train_dir = os.path.join(FLAGS.log_root, \"train\")\n vocab_metadata_path = os.path.join(train_dir, \"vocab_metadata.tsv\")\n self._vocab.write_metadata(vocab_metadata_path) # write metadata file\n summary_writer = tf.summary.FileWriter(train_dir)\n config = projector.ProjectorConfig()\n embedding = config.embeddings.add()\n embedding.tensor_name = embedding_var.name\n embedding.metadata_path = vocab_metadata_path\n projector.visualize_embeddings(summary_writer, config)\n\n def _add_seq2seq(self):\n \"\"\"Add the whole sequence-to-sequence model to the graph.\"\"\"\n hps = self._hps\n vsize = self._vocab.size() # size of the vocabulary\n\n with tf.variable_scope('seq2seq'):\n # Some initializers\n self.rand_unif_init = tf.random_uniform_initializer(-hps.rand_unif_init_mag, hps.rand_unif_init_mag, seed=123)\n self.trunc_norm_init = tf.truncated_normal_initializer(stddev=hps.trunc_norm_init_std)\n\n # Add embedding matrix (shared by the encoder and decoder inputs)\n with tf.variable_scope('embedding'):\n embedding = tf.get_variable('embedding', [vsize, hps.emb_dim], dtype=tf.float32, initializer=self.trunc_norm_init)\n if hps.mode==\"train\": self._add_emb_vis(embedding) # add to tensorboard\n emb_enc_inputs = tf.nn.embedding_lookup(embedding, self._enc_batch) # tensor with shape (batch_size, max_enc_steps, emb_size)\n emb_dec_inputs = [tf.nn.embedding_lookup(embedding, x) for x in tf.unstack(self._dec_batch, axis=1)] # list length max_dec_steps containing shape (batch_size, emb_size)\n\n # Add the encoder.\n enc_outputs, fw_st, bw_st = self._add_encoder(emb_enc_inputs, self._enc_lens)\n self._enc_states = enc_outputs\n\n # Our encoder is bidirectional and our decoder is unidirectional so we need to reduce the final encoder hidden state to the right size to be the initial decoder hidden state\n self._dec_in_state = self._reduce_states(fw_st, bw_st)\n\n # Add the decoder.\n with tf.variable_scope('decoder'):\n decoder_outputs, self._dec_out_state, self.attn_dists, self.p_gens, self.coverage = self._add_decoder(emb_dec_inputs)\n\n # Add the output projection to obtain the vocabulary distribution\n with tf.variable_scope('output_projection'):\n w = tf.get_variable('w', [hps.hidden_dim, vsize], dtype=tf.float32, initializer=self.trunc_norm_init)\n w_t = tf.transpose(w)\n v = tf.get_variable('v', [vsize], dtype=tf.float32, initializer=self.trunc_norm_init)\n vocab_scores = [] # vocab_scores is the vocabulary distribution before applying softmax. Each entry on the list corresponds to one decoder step\n for i,output in enumerate(decoder_outputs):\n if i > 0:\n tf.get_variable_scope().reuse_variables()\n vocab_scores.append(tf.nn.xw_plus_b(output, w, v)) # apply the linear layer\n\n vocab_dists = [tf.nn.softmax(s) for s in vocab_scores] # The vocabulary distributions. List length max_dec_steps of (batch_size, vsize) arrays. The words are in the order they appear in the vocabulary file.\n\n\n # For pointer-generator model, calc final distribution from copy distribution and vocabulary distribution\n if FLAGS.pointer_gen:\n final_dists = self._calc_final_dist(vocab_dists, self.attn_dists)\n else: # final distribution is just vocabulary distribution\n final_dists = vocab_dists\n\n\n\n if hps.mode in ['train', 'eval']:\n # Calculate the loss\n with tf.variable_scope('loss'):\n if FLAGS.pointer_gen:\n # Calculate the loss per step\n # This is fiddly; we use tf.gather_nd to pick out the probabilities of the gold target words\n loss_per_step = [] # will be list length max_dec_steps containing shape (batch_size)\n batch_nums = tf.range(0, limit=hps.batch_size) # shape (batch_size)\n for dec_step, dist in enumerate(final_dists):\n targets = self._target_batch[:,dec_step] # The indices of the target words. shape (batch_size)\n indices = tf.stack( (batch_nums, targets), axis=1) # shape (batch_size, 2)\n gold_probs = tf.gather_nd(dist, indices) # shape (batch_size). prob of correct words on this step\n losses = -tf.log(gold_probs)\n loss_per_step.append(losses)\n\n # Apply dec_padding_mask and get loss\n self._loss = _mask_and_avg(loss_per_step, self._dec_padding_mask)\n\n else: # baseline model\n self._loss = tf.contrib.seq2seq.sequence_loss(tf.stack(vocab_scores, axis=1), self._target_batch, self._dec_padding_mask) # this applies softmax internally\n\n tf.summary.scalar('loss', self._loss)\n\n # Calculate coverage loss from the attention distributions\n if hps.coverage:\n with tf.variable_scope('coverage_loss'):\n self._coverage_loss = _coverage_loss(self.attn_dists, self._dec_padding_mask)\n tf.summary.scalar('coverage_loss', self._coverage_loss)\n self._total_loss = self._loss + hps.cov_loss_wt * self._coverage_loss\n tf.summary.scalar('total_loss', self._total_loss)\n\n if hps.mode == \"decode\":\n # We run decode beam search mode one decoder step at a time\n assert len(final_dists)==1 # final_dists is a singleton list containing shape (batch_size, extended_vsize)\n final_dists = final_dists[0]\n topk_probs, self._topk_ids = tf.nn.top_k(final_dists, hps.batch_size*2) # take the k largest probs. note batch_size=beam_size in decode mode\n self._topk_log_probs = tf.log(topk_probs)\n\n\n def _add_train_op(self):\n \"\"\"Sets self._train_op, the op to run for training.\"\"\"\n # Take gradients of the trainable variables w.r.t. the loss function to minimize\n loss_to_minimize = self._total_loss if self._hps.coverage else self._loss\n tvars = tf.trainable_variables()\n gradients = tf.gradients(loss_to_minimize, tvars, aggregation_method=tf.AggregationMethod.EXPERIMENTAL_TREE)\n\n # Clip the gradients\n with tf.device(\"/gpu:0\"):\n grads, global_norm = tf.clip_by_global_norm(gradients, self._hps.max_grad_norm)\n\n # Add a summary\n tf.summary.scalar('global_norm', global_norm)\n\n # Apply adagrad optimizer\n optimizer = tf.train.AdagradOptimizer(self._hps.lr, initial_accumulator_value=self._hps.adagrad_init_acc)\n with tf.device(\"/gpu:0\"):\n self._train_op = optimizer.apply_gradients(zip(grads, tvars), global_step=self.global_step, name='train_step')\n\n\n def build_graph(self):\n \"\"\"Add the placeholders, model, global step, train_op and summaries to the graph\"\"\"\n tf.logging.info('Building graph...')\n t0 = time.time()\n self._add_placeholders()\n with tf.device(\"/gpu:0\"):\n self._add_seq2seq()\n self.global_step = tf.Variable(0, name='global_step', trainable=False)\n if self._hps.mode == 'train':\n self._add_train_op()\n self._summaries = tf.summary.merge_all()\n t1 = time.time()\n tf.logging.info('Time to build graph: %i seconds', t1 - t0)\n\n def run_train_step(self, sess, batch):\n \"\"\"Runs one training iteration. Returns a dictionary containing train op, summaries, loss, global_step and (optionally) coverage loss.\"\"\"\n feed_dict = self._make_feed_dict(batch)\n to_return = {\n 'train_op': self._train_op,\n 'summaries': self._summaries,\n 'loss': self._loss,\n 'global_step': self.global_step,\n }\n if self._hps.coverage:\n to_return['coverage_loss'] = self._coverage_loss\n return sess.run(to_return, feed_dict)\n\n def run_eval_step(self, sess, batch):\n \"\"\"Runs one evaluation iteration. Returns a dictionary containing summaries, loss, global_step and (optionally) coverage loss.\"\"\"\n feed_dict = self._make_feed_dict(batch)\n to_return = {\n 'summaries': self._summaries,\n 'loss': self._loss,\n 'global_step': self.global_step,\n }\n if self._hps.coverage:\n to_return['coverage_loss'] = self._coverage_loss\n return sess.run(to_return, feed_dict)\n\n def run_encoder(self, sess, batch):\n \"\"\"For beam search decoding. Run the encoder on the batch and return the encoder states and decoder initial state.\n\n Args:\n sess: Tensorflow session.\n batch: Batch object that is the same example repeated across the batch (for beam search)\n\n Returns:\n enc_states: The encoder states. A tensor of shape [batch_size, <=max_enc_steps, 2*hidden_dim].\n dec_in_state: A LSTMStateTuple of shape ([1,hidden_dim],[1,hidden_dim])\n \"\"\"\n feed_dict = self._make_feed_dict(batch, just_enc=True) # feed the batch into the placeholders\n (enc_states, dec_in_state, global_step) = sess.run([self._enc_states, self._dec_in_state, self.global_step], feed_dict) # run the encoder\n\n # dec_in_state is LSTMStateTuple shape ([batch_size,hidden_dim],[batch_size,hidden_dim])\n # Given that the batch is a single example repeated, dec_in_state is identical across the batch so we just take the top row.\n dec_in_state = tf.contrib.rnn.LSTMStateTuple(dec_in_state.c[0], dec_in_state.h[0])\n return enc_states, dec_in_state\n\n\n def decode_onestep(self, sess, batch, latest_tokens, enc_states, dec_init_states, prev_coverage):\n \"\"\"For beam search decoding. Run the decoder for one step.\n\n Args:\n sess: Tensorflow session.\n batch: Batch object containing single example repeated across the batch\n latest_tokens: Tokens to be fed as input into the decoder for this timestep\n enc_states: The encoder states.\n dec_init_states: List of beam_size LSTMStateTuples; the decoder states from the previous timestep\n prev_coverage: List of np arrays. The coverage vectors from the previous timestep. List of None if not using coverage.\n\n Returns:\n ids: top 2k ids. shape [beam_size, 2*beam_size]\n probs: top 2k log probabilities. shape [beam_size, 2*beam_size]\n new_states: new states of the decoder. a list length beam_size containing\n LSTMStateTuples each of shape ([hidden_dim,],[hidden_dim,])\n attn_dists: List length beam_size containing lists length attn_length.\n p_gens: Generation probabilities for this step. A list length beam_size. List of None if in baseline mode.\n new_coverage: Coverage vectors for this step. A list of arrays. List of None if coverage is not turned on.\n \"\"\"\n\n beam_size = len(dec_init_states)\n\n # Turn dec_init_states (a list of LSTMStateTuples) into a single LSTMStateTuple for the batch\n cells = [np.expand_dims(state.c, axis=0) for state in dec_init_states]\n hiddens = [np.expand_dims(state.h, axis=0) for state in dec_init_states]\n new_c = np.concatenate(cells, axis=0) # shape [batch_size,hidden_dim]\n new_h = np.concatenate(hiddens, axis=0) # shape [batch_size,hidden_dim]\n new_dec_in_state = tf.contrib.rnn.LSTMStateTuple(new_c, new_h)\n\n feed = {\n self._enc_states: enc_states,\n self._enc_padding_mask: batch.enc_padding_mask,\n self._dec_in_state: new_dec_in_state,\n self._dec_batch: np.transpose(np.array([latest_tokens])),\n }\n\n to_return = {\n \"ids\": self._topk_ids,\n \"probs\": self._topk_log_probs,\n \"states\": self._dec_out_state,\n \"attn_dists\": self.attn_dists\n }\n\n if FLAGS.pointer_gen:\n feed[self._enc_batch_extend_vocab] = batch.enc_batch_extend_vocab\n feed[self._max_art_oovs] = batch.max_art_oovs\n to_return['p_gens'] = self.p_gens\n\n if self._hps.coverage:\n feed[self.prev_coverage] = np.stack(prev_coverage, axis=0)\n to_return['coverage'] = self.coverage\n\n results = sess.run(to_return, feed_dict=feed) # run the decoder step\n\n # Convert results['states'] (a single LSTMStateTuple) into a list of LSTMStateTuple -- one for each hypothesis\n new_states = [tf.contrib.rnn.LSTMStateTuple(results['states'].c[i, :], results['states'].h[i, :]) for i in xrange(beam_size)]\n\n # Convert singleton list containing a tensor to a list of k arrays\n assert len(results['attn_dists'])==1\n attn_dists = results['attn_dists'][0].tolist()\n\n if FLAGS.pointer_gen:\n # Convert singleton list containing a tensor to a list of k arrays\n assert len(results['p_gens'])==1\n p_gens = results['p_gens'][0].tolist()\n else:\n p_gens = [None for _ in xrange(beam_size)]\n\n # Convert the coverage tensor to a list length k containing the coverage vector for each hypothesis\n if FLAGS.coverage:\n new_coverage = results['coverage'].tolist()\n assert len(new_coverage) == beam_size\n else:\n new_coverage = [None for _ in xrange(beam_size)]\n\n return results['ids'], results['probs'], new_states, attn_dists, p_gens, new_coverage\n\n\ndef _mask_and_avg(values, padding_mask):\n \"\"\"Applies mask to values then returns overall average (a scalar)\n\n Args:\n values: a list length max_dec_steps containing arrays shape (batch_size).\n padding_mask: tensor shape (batch_size, max_dec_steps) containing 1s and 0s.\n\n Returns:\n a scalar\n \"\"\"\n\n dec_lens = tf.reduce_sum(padding_mask, axis=1) # shape batch_size. float32\n values_per_step = [v * padding_mask[:,dec_step] for dec_step,v in enumerate(values)]\n values_per_ex = sum(values_per_step)/dec_lens # shape (batch_size); normalized value for each batch member\n return tf.reduce_mean(values_per_ex) # overall average\n\n\ndef _coverage_loss(attn_dists, padding_mask):\n \"\"\"Calculates the coverage loss from the attention distributions.\n\n Args:\n attn_dists: The attention distributions for each decoder timestep. A list length max_dec_steps containing shape (batch_size, attn_length)\n padding_mask: shape (batch_size, max_dec_steps).\n\n Returns:\n coverage_loss: scalar\n \"\"\"\n coverage = tf.zeros_like(attn_dists[0]) # shape (batch_size, attn_length). Initial coverage is zero.\n covlosses = [] # Coverage loss per decoder timestep. Will be list length max_dec_steps containing shape (batch_size).\n for a in attn_dists:\n covloss = tf.reduce_sum(tf.minimum(a, coverage), [1]) # calculate the coverage loss for this step\n covlosses.append(covloss)\n coverage += a # update the coverage vector\n coverage_loss = _mask_and_avg(covlosses, padding_mask)\n return coverage_loss\n"
] | [
[
"tensorflow.get_variable",
"tensorflow.device",
"tensorflow.concat",
"numpy.expand_dims",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.stack",
"tensorflow.minimum",
"tensorflow.nn.bidirectional_dynamic_rnn",
"numpy.concatenate",
"tensorflow.summary.scalar",
"tensorflow.Variable",
"tensorflow.random_uniform_initializer",
"tensorflow.gradients",
"tensorflow.truncated_normal_initializer",
"numpy.stack",
"tensorflow.nn.top_k",
"tensorflow.trainable_variables",
"tensorflow.tile",
"tensorflow.nn.xw_plus_b",
"tensorflow.train.AdagradOptimizer",
"tensorflow.matmul",
"tensorflow.gather_nd",
"tensorflow.unstack",
"tensorflow.shape",
"tensorflow.placeholder",
"tensorflow.scatter_nd",
"tensorflow.zeros_like",
"tensorflow.contrib.rnn.LSTMStateTuple",
"tensorflow.logging.info",
"tensorflow.summary.merge_all",
"tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig",
"numpy.array",
"tensorflow.nn.embedding_lookup",
"tensorflow.nn.softmax",
"tensorflow.summary.FileWriter",
"tensorflow.transpose",
"tensorflow.range",
"tensorflow.reduce_mean",
"tensorflow.expand_dims",
"tensorflow.clip_by_global_norm",
"tensorflow.contrib.rnn.LSTMCell",
"tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings",
"tensorflow.log",
"tensorflow.variable_scope",
"tensorflow.get_variable_scope"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
magictron/NeMo | [
"aae9e3405b6e78dff864d21ea59331abdadc0217"
] | [
"nemo/collections/nlp/data/datasets/punctuation_capitalization_dataset.py"
] | [
"# =============================================================================\n# Copyright 2020 NVIDIA. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# =============================================================================\n\n__all__ = ['BertPunctuationCapitalizationDataset', 'BertPunctuationCapitalizationInferDataset']\n\nimport itertools\nimport os\nimport pickle\n\nimport numpy as np\nfrom torch.utils.data import Dataset\n\nfrom nemo import logging\nfrom nemo.collections.nlp.data.datasets.datasets_utils import get_label_stats, get_stats\n\n\ndef get_features(\n queries,\n max_seq_length,\n tokenizer,\n punct_label_ids=None,\n capit_label_ids=None,\n pad_label='O',\n punct_labels_lines=None,\n capit_labels_lines=None,\n ignore_extra_tokens=False,\n ignore_start_end=False,\n):\n \"\"\"\n Args:\n queries (list of str): text sequences\n max_seq_length (int): max sequence length minus 2 for [CLS] and [SEP]\n tokenizer (TokenizerSpec): such as NemoBertTokenizer\n pad_label (str): pad value use for labels.\n by default, it's the neutral label.\n punct_label_ids (dict): dict to map punctuation labels to label ids.\n Starts with pad_label->0 and then increases in alphabetical order.\n Required for training and evaluation, not needed for inference.\n capit_label_ids (dict): dict to map labels to label ids. Starts\n with pad_label->0 and then increases in alphabetical order.\n Required for training and evaluation, not needed for inference.\n punct_labels (list of str): list of labels for every word in a sequence\n capit_labels (list of str): list of labels for every word in a sequence\n ignore_extra_tokens (bool): whether to ignore extra tokens in\n the loss_mask,\n ignore_start_end (bool): whether to ignore bos and eos tokens in\n the loss_mask\n \"\"\"\n all_subtokens = []\n all_loss_mask = []\n all_subtokens_mask = []\n all_segment_ids = []\n all_input_ids = []\n all_input_mask = []\n sent_lengths = []\n punct_all_labels = []\n capit_all_labels = []\n with_label = False\n\n if punct_labels_lines and capit_labels_lines:\n with_label = True\n\n for i, query in enumerate(queries):\n words = query.strip().split()\n\n # add bos token\n subtokens = [tokenizer.cls_token]\n loss_mask = [1 - ignore_start_end]\n subtokens_mask = [0]\n if with_label:\n pad_id = punct_label_ids[pad_label]\n punct_labels = [pad_id]\n punct_query_labels = [punct_label_ids[lab] for lab in punct_labels_lines[i]]\n\n capit_labels = [pad_id]\n capit_query_labels = [capit_label_ids[lab] for lab in capit_labels_lines[i]]\n\n for j, word in enumerate(words):\n word_tokens = tokenizer.text_to_tokens(word)\n subtokens.extend(word_tokens)\n\n loss_mask.append(1)\n loss_mask.extend([int(not ignore_extra_tokens)] * (len(word_tokens) - 1))\n\n subtokens_mask.append(1)\n subtokens_mask.extend([0] * (len(word_tokens) - 1))\n\n if with_label:\n punct_labels.extend([punct_query_labels[j]] * len(word_tokens))\n capit_labels.extend([capit_query_labels[j]] * len(word_tokens))\n\n # add eos token\n subtokens.append(tokenizer.sep_token)\n loss_mask.append(1 - ignore_start_end)\n subtokens_mask.append(0)\n sent_lengths.append(len(subtokens))\n all_subtokens.append(subtokens)\n all_loss_mask.append(loss_mask)\n all_subtokens_mask.append(subtokens_mask)\n all_input_mask.append([1] * len(subtokens))\n\n if with_label:\n punct_labels.append(pad_id)\n punct_all_labels.append(punct_labels)\n capit_labels.append(pad_id)\n capit_all_labels.append(capit_labels)\n\n max_seq_length = min(max_seq_length, max(sent_lengths))\n logging.info(f'Max length: {max_seq_length}')\n get_stats(sent_lengths)\n too_long_count = 0\n\n for i, subtokens in enumerate(all_subtokens):\n if len(subtokens) > max_seq_length:\n subtokens = [tokenizer.cls_token] + subtokens[-max_seq_length + 1 :]\n all_input_mask[i] = [1] + all_input_mask[i][-max_seq_length + 1 :]\n all_loss_mask[i] = [int(not ignore_start_end)] + all_loss_mask[i][-max_seq_length + 1 :]\n all_subtokens_mask[i] = [0] + all_subtokens_mask[i][-max_seq_length + 1 :]\n\n if with_label:\n punct_all_labels[i] = [pad_id] + punct_all_labels[i][-max_seq_length + 1 :]\n capit_all_labels[i] = [pad_id] + capit_all_labels[i][-max_seq_length + 1 :]\n too_long_count += 1\n\n all_input_ids.append([tokenizer.tokens_to_ids(t) for t in subtokens])\n\n if len(subtokens) < max_seq_length:\n extra = max_seq_length - len(subtokens)\n all_input_ids[i] = all_input_ids[i] + [0] * extra\n all_loss_mask[i] = all_loss_mask[i] + [0] * extra\n all_subtokens_mask[i] = all_subtokens_mask[i] + [0] * extra\n all_input_mask[i] = all_input_mask[i] + [0] * extra\n\n if with_label:\n punct_all_labels[i] = punct_all_labels[i] + [pad_id] * extra\n capit_all_labels[i] = capit_all_labels[i] + [pad_id] * extra\n\n all_segment_ids.append([0] * max_seq_length)\n\n logging.info(f'{too_long_count} are longer than {max_seq_length}')\n\n for i in range(min(len(all_input_ids), 5)):\n logging.info(\"*** Example ***\")\n logging.info(\"i: %s\" % (i))\n logging.info(\"subtokens: %s\" % \" \".join(list(map(str, all_subtokens[i]))))\n logging.info(\"loss_mask: %s\" % \" \".join(list(map(str, all_loss_mask[i]))))\n logging.info(\"input_mask: %s\" % \" \".join(list(map(str, all_input_mask[i]))))\n logging.info(\"subtokens_mask: %s\" % \" \".join(list(map(str, all_subtokens_mask[i]))))\n if with_label:\n logging.info(\"punct_labels: %s\" % \" \".join(list(map(str, punct_all_labels[i]))))\n logging.info(\"capit_labels: %s\" % \" \".join(list(map(str, capit_all_labels[i]))))\n\n return (\n all_input_ids,\n all_segment_ids,\n all_input_mask,\n all_loss_mask,\n all_subtokens_mask,\n punct_all_labels,\n capit_all_labels,\n punct_label_ids,\n capit_label_ids,\n )\n\n\nclass BertPunctuationCapitalizationDataset(Dataset):\n \"\"\"\n Creates dataset to use during training for token classification\n tasks with a pretrained model.\n\n Converts from raw data to an instance that can be used by\n NMDataLayer.\n\n For dataset to use during inference without labels, see\n BertPunctuationCapitalizationInferDataset.\n\n Args:\n text_file (str): file to sequences, each line should a sentence,\n No header.\n label_file (str): file to labels, each line corresponds to\n word labels for a sentence in the text_file. No header.\n max_seq_length (int): max sequence length minus 2 for [CLS] and [SEP]\n tokenizer (Tokenizer): such as NemoBertTokenizer\n num_samples (int): number of samples you want to use for the dataset.\n If -1, use all dataset. Useful for testing.\n pad_label (str): pad value use for labels.\n by default, it's the neutral label.\n punct_label_ids and capit_label_ids (dict):\n dict to map labels to label ids.\n Starts with pad_label->0 and then increases in alphabetical order\n For dev set use label_ids generated during training to support\n cases when not all labels are present in the dev set.\n For training set label_ids should be None.\n ignore_extra_tokens (bool): whether to ignore extra tokens in\n the loss_mask,\n ignore_start_end (bool): whether to ignore bos and eos tokens in\n the loss_mask\n \"\"\"\n\n def __init__(\n self,\n text_file,\n label_file,\n max_seq_length,\n tokenizer,\n num_samples=-1,\n pad_label='O',\n punct_label_ids=None,\n capit_label_ids=None,\n ignore_extra_tokens=False,\n ignore_start_end=False,\n use_cache=False,\n ):\n\n if use_cache:\n # Cache features\n data_dir = os.path.dirname(text_file)\n filename = os.path.basename(text_file)\n\n if not filename.endswith('.txt'):\n raise ValueError(\"{text_file} should have extension .txt\")\n\n filename = filename[:-4]\n tokenizer_type = type(tokenizer.tokenizer).__name__\n vocab_size = getattr(tokenizer, \"vocab_size\", 0)\n features_pkl = os.path.join(\n data_dir, \"cached_{}_{}_{}_{}\".format(filename, tokenizer_type, str(max_seq_length), str(vocab_size)),\n )\n\n if use_cache and os.path.exists(features_pkl):\n # If text_file was already processed, load from pickle\n features = pickle.load(open(features_pkl, 'rb'))\n logging.info(f'features restored from {features_pkl}')\n else:\n if num_samples == 0:\n raise ValueError(\"num_samples has to be positive\", num_samples)\n\n with open(text_file, 'r') as f:\n text_lines = f.readlines()\n\n # Collect all possible labels\n punct_unique_labels = set([])\n capit_unique_labels = set([])\n punct_labels_lines = []\n capit_labels_lines = []\n with open(label_file, 'r') as f:\n for line in f:\n line = line.strip().split()\n\n # extract punctuation and capitalization labels\n punct_line, capit_line = zip(*line)\n punct_labels_lines.append(punct_line)\n capit_labels_lines.append(capit_line)\n\n punct_unique_labels.update(punct_line)\n capit_unique_labels.update(capit_line)\n\n if len(punct_labels_lines) != len(text_lines):\n raise ValueError(\"Labels file should contain labels for every word\")\n\n dataset = list(zip(text_lines, punct_labels_lines, capit_labels_lines))\n\n if num_samples > 0:\n dataset = dataset[:num_samples]\n\n dataset = list(zip(*dataset))\n text_lines = dataset[0]\n punct_labels_lines = dataset[1]\n capit_labels_lines = dataset[2]\n\n # for dev/test sets use label mapping from training set\n if punct_label_ids:\n if len(punct_label_ids) != len(punct_unique_labels):\n logging.info(\n 'Not all labels from the specified'\n + 'label_ids dictionary are present in the'\n + 'current dataset. Using the provided'\n + 'label_ids dictionary.'\n )\n else:\n logging.info('Using the provided label_ids dictionary.')\n else:\n logging.info(\n 'Creating a new label to label_id dictionary.'\n + ' It\\'s recommended to use label_ids generated'\n + ' during training for dev/test sets to avoid'\n + ' errors if some labels are not'\n + ' present in the dev/test sets.'\n + ' For training set label_ids should be None.'\n )\n\n def create_label_ids(unique_labels, pad_label=pad_label):\n label_ids = {pad_label: 0}\n if pad_label in unique_labels:\n unique_labels.remove(pad_label)\n for label in sorted(unique_labels):\n label_ids[label] = len(label_ids)\n return label_ids\n\n punct_label_ids = create_label_ids(punct_unique_labels)\n capit_label_ids = create_label_ids(capit_unique_labels)\n\n features = get_features(\n text_lines,\n max_seq_length,\n tokenizer,\n pad_label=pad_label,\n punct_labels_lines=punct_labels_lines,\n capit_labels_lines=capit_labels_lines,\n punct_label_ids=punct_label_ids,\n capit_label_ids=capit_label_ids,\n ignore_extra_tokens=ignore_extra_tokens,\n ignore_start_end=ignore_start_end,\n )\n\n if use_cache:\n pickle.dump(features, open(features_pkl, \"wb\"))\n logging.info(f'features saved to {features_pkl}')\n\n self.all_input_ids = features[0]\n self.all_segment_ids = features[1]\n self.all_input_mask = features[2]\n self.all_loss_mask = features[3]\n self.all_subtokens_mask = features[4]\n self.punct_all_labels = features[5]\n self.capit_all_labels = features[6]\n self.punct_label_ids = features[7]\n self.capit_label_ids = features[8]\n\n # save label_ids\n def get_stats_and_save(all_labels, label_ids, name):\n infold = text_file[: text_file.rfind('/')]\n merged_labels = itertools.chain.from_iterable(all_labels)\n logging.info('Three most popular labels')\n _, label_frequencies = get_label_stats(merged_labels, infold + '/label_count_' + name + '.tsv')\n\n out = open(os.path.join(infold, name + '_label_ids.csv'), 'w')\n labels, _ = zip(*sorted(label_ids.items(), key=lambda x: x[1]))\n out.write('\\n'.join(labels))\n logging.info(f'Labels: {label_ids}')\n logging.info(f'Labels mapping saved to : {out.name}')\n\n return label_frequencies\n\n self.punct_label_frequencies = get_stats_and_save(self.punct_all_labels, self.punct_label_ids, 'punct')\n self.capit_label_frequencies = get_stats_and_save(self.capit_all_labels, self.capit_label_ids, 'capit')\n\n def __len__(self):\n return len(self.all_input_ids)\n\n def __getitem__(self, idx):\n return (\n np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n np.array(self.all_input_mask[idx], dtype=np.long),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]),\n np.array(self.punct_all_labels[idx]),\n np.array(self.capit_all_labels[idx]),\n )\n\n\nclass BertPunctuationCapitalizationInferDataset(Dataset):\n \"\"\"\n Creates dataset to use during inference for token classification\n tasks with a pretrained model.\n\n Converts from raw data to an instance that can be used by\n NMDataLayer.\n\n For dataset to use during training with labels, see\n BertPunctuationCapitalizationDataset.\n\n Args:\n queries (list): list of queries to run inference on\n max_seq_length (int): max sequence length minus 2 for [CLS] and [SEP]\n tokenizer (Tokenizer): such as NemoBertTokenizer\n \"\"\"\n\n def __init__(self, queries, max_seq_length, tokenizer):\n features = get_features(queries, max_seq_length, tokenizer)\n\n self.all_input_ids = features[0]\n self.all_segment_ids = features[1]\n self.all_input_mask = features[2]\n self.all_loss_mask = features[3]\n self.all_subtokens_mask = features[4]\n\n def __len__(self):\n return len(self.all_input_ids)\n\n def __getitem__(self, idx):\n return (\n np.array(self.all_input_ids[idx]),\n np.array(self.all_segment_ids[idx]),\n np.array(self.all_input_mask[idx], dtype=np.float32),\n np.array(self.all_loss_mask[idx]),\n np.array(self.all_subtokens_mask[idx]),\n )\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
craigmax-dev/Mixed-Integer-Linear-Programming-for-Spacecraft-Maneuvers | [
"d14bda5f0dca2bf6b7e25b58e36d10e096b7612f"
] | [
"lib/python/tests/PAPER_03a.py"
] | [
"# # -----------------------------------------------------------------------------\n# # PAPER_03\n# # -----------------------------------------------------------------------------\n# # ISS Remote Camera \n\n# # --------------\n# # Imports\n# # --------------\n# import numpy as np\n\n# # --------------\n# # Model Settings\n# # --------------\n# # Problem File title\n# title = \"Paper Simulation 3: ISS Remote Camera \"\n# # Define active constraints\n# activeConstraints = { \n# \"basic\":True, \n# \"obstacleAvoidance\":False, \n# \"collisionAvoidance\":False, \n# \"plumeAvoidanceVehicle\":False,\n# \"plumeAvoidanceObstacle\":False,\n# \"finalConfigurationSelection\":False}\n# # Define outputs\n# outputs = {\n# \"dataframe\":True,\n# \"paths\":True,\n# \"inputVector\":True,\n# \"separation\":False,\n# \"minSeparation\":False,\n# \"saveFig\":True}\n# # Define colour scheme\n# colourScheme = \"paletteDark6\"\n# # Axis limits\n# # axLims = np.array([-20, 30, -10, 40, -20, 20])\n# axLims = np.array([0, 100, 0, 100, 0, 100])\n\n# # ---------------\n# # Model Variables\n# # ---------------\n# N = 3 # Number of dimensions\n# T = 11 # Number of time steps\n# T_end = 100 # End time (s)\n# V = 1 # Number of vehicles\n# P = 10 # Plume Length (m)\n# W = 1 # Plume Width (m)\n# M = 1000000 # Big M Method\n# m = np.array([5]) # Mass of satellites (kg)\n# minApproachDist = 1 # Min displacement between satellie and any obstacle in each dimension (m)\n# omega = 0 # (s)\n# dynamicsModel = \"freeSpace\" # Define dynamics model - \"hills\" or \"freeSpace\"\n\n# # Initial state vector\n# # x_ini = np.array([[-2, 12, 0, 0, 0, 0]]) # x_i = 0, v_i = 0\n# x_ini = np.array([[6, 0, 0, 0, 0, 0]]) # x_i = 0, v_i = 0\n# # Final state vector\n# # x_fin = np.array([[3, 17, 4, 0, 0, 0]]) # x_i = 1, v_i = 0\n# x_fin = np.array([[8, 0, 0, 0, 0, 0]]) # x_i = 1, v_i = 0\n# # State vector limits\n# x_lim = 100*np.ones([V, 2*N])\n# # Input vector limits\n# u_lim = 1e-6*np.ones([V, N]) # Thrust (N)\n# # Objects - only work in 2D+\n# # objects = np.array([\n# # [0, 5, 0, 30, 0, 5], \n# # [0, 20, 14, 15, 2, 3], \n# # [-10, 0, 13, 16, 1, 4], \n# # [2, 3, 5, 10, -10, 10]]) #\n# objects = np.array([]) #\n# # Safety distance\n# r = 1*np.ones([N])\n\n# -----------------------------------------------------------------------------\n# INTEGRATION_TEST_06\n# -----------------------------------------------------------------------------\n\n# --------------\n# Imports\n# --------------\nimport numpy as np\n\n# --------------\n# Model Settings\n# --------------\n# Problem File title\ntitle = \"Integration Test 6: Plume Avoidance for Vehicles\"\n# Define active constraints\nactiveConstraints = { \n \"basic\":True, \n \"obstacleAvoidance\":True, \n \"collisionAvoidance\":False, \n \"plumeAvoidanceVehicle\":True,\n \"plumeAvoidanceObstacle\":False,\n \"finalConfigurationSelection\":False}\n# Define dynamics model\ndynamicsModel = \"freeSpace\" # Define dynamics model - \"hills\" or \"freeSpace\"\n# Define outputs\noutputs = {\n \"dataframe\":True,\n \"paths\":True,\n \"inputVector\":True,\n \"separation\":False,\n \"minSeparation\":False,\n \"saveFig\":True}\n# Define colour scheme\ncolourScheme = \"paletteDark6\"\n# Axis limits\naxLims = np.array([-10, 20, 0, 30, -5, 10])\n\n# ---------------\n# Model Variables\n# ---------------\nN = 3 # Number of dimensions\nT = 41 # Number of time steps\nT_end = 1000 # End time (s)\nV = 1 # Number of vehicles\nP = 10 # Plume Length (m)\nW = 1 # Plume Width (m)\nM = 1000000 # Big M Method\nm = np.array([5]) # Mass of satellites (kg)\nminApproachDist = 1 # Min displacement between satellie and any obstacle in each dimension (m)\nomega = 0 # (s)\ndynamicsModel = \"freeSpace\" # Define dynamics model - \"hills\" or \"freeSpace\"\n\n# Initial state vector\nx_ini = np.array([[-2, 12, 2, 0, 0, 0]]) # x_i = 0, v_i = 0\n# Final state vector\nx_fin = np.array([[7, 5, 2, 0, 0, 0]]) # x_i = 1, v_i = 0\n# State vector limits\nx_lim = 100*np.ones([V, 2*N])\n# Input vector limits\nu_lim = 1*np.ones([V, N]) # Thrust (N)\n# Objects - only work in 2D+\nobjects = np.array([\n [0, 5, 0, 30, 0, 5], \n [0, 20, 14, 15, 2, 3], \n [-10, 0, 13, 16, 1, 4], \n [2, 3, 5, 10, -5, 10]]) #\n# Safety distance\nr = 1*np.ones([N])"
] | [
[
"numpy.array",
"numpy.ones"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DeercoderPractice/python | [
"4a32cc8922f47baea390e8167e34f185f67ae0fd"
] | [
"cnn_python_tutorial/numpy/subplot.py"
] | [
"#!/usr/bin/env python\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# Compute the x and y coordinates for points on sine and cosine curves\nx = np.arange(0, 3 * np.pi, 0.1)\ny_sin = np.sin(x)\ny_cos = np.cos(x)\n\n# Set up a subplot grid that has height 2 and width 1,\n# and set the first such subplot as active.\nplt.subplot(2, 1, 1)\n\n# Make the first plot\nplt.plot(x, y_sin)\nplt.title('Sine')\n\n# Set the second subplot as active, and make the second plot.\nplt.subplot(2, 1, 2)\nplt.plot(x, y_cos)\nplt.title('Cosine')\n\n# Show the figure.\nplt.show()\n"
] | [
[
"matplotlib.pyplot.title",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gaxler/dataset_agnostic_segmentation | [
"7dd8e68c735c2602ac9bf1c199960f530e9848bc"
] | [
"statistics/segmentation.py"
] | [
"\nimport numpy as np\n\nfrom lib.bbox import bbox_overlaps\n\nfrom utils import get_coverage_mapping, save_stat_page\nfrom lib.show_images import debugShowBoxes\n\n\ndef update_segmentation_stats(meta_images, doc_images, gt_boxes, params, pred_boxes, binary_icdar=True, viz=False, save_path=None, test_phase=False, untrim_icdar=0.1):\n \"\"\"\n \"\"\"\n batch_size = doc_images.shape[0]\n\n batch_imgs = []\n for n in range(batch_size):\n bidx = np.where(pred_boxes[:, 0] == n)[0]\n predictions = pred_boxes[bidx, 1:]\n meta = meta_images[n]\n gtidx = np.where(gt_boxes[:, 0] == n)[0]\n gt_boxes = gt_boxes[gtidx, 1:]\n\n # If ICDAR is being evaluated use fg pixel IoU calc and untrim boxes\n use_pixel_level = 'icdar' in str(meta.path) and binary_icdar\n predictions = untrim_boxes(predictions, trim=untrim_icdar) if use_pixel_level else predictions\n\n if test_phase and save_path is not None:\n pred_stats, pred_img = page_eval(doc_images[n, :], predictions, gt_boxes, use_pixel_level=use_pixel_level, output_all=params.output_all)\n save_stat_page(name=meta.path, pred_img=pred_img, pred_stat=pred_stats, save_path=save_path)\n\n if viz:\n viz_img = doc_images[n, :].copy()\n box_viz_img = debugShowBoxes(viz_img, boxes=predictions, gt_boxes=gt_boxes, wait=0, dont_show=True)\n batch_imgs.append(box_viz_img[np.newaxis, :])\n\n box_viz_img = np.vstack(batch_imgs) if len(batch_imgs) > 0 else None\n return box_viz_img\n\n\ndef page_eval(page_image, pred_boxes, gt_boxes, use_pixel_level=True, output_all=False):\n page_stats = {}\n overlaps = bbox_overlaps(gt_boxes.astype(np.float32), pred_boxes.astype(np.float32))\n gt_to_pred_map = get_coverage_mapping(overlaps.T)\n inv_page_binary = _inverse_binary(page_image, thresh=0.99)\n\n output_titles = []\n output_boxes = []\n # Check each gt_box\n for ind in range(gt_boxes.shape[0]):\n word_stats = {}\n gt_box = gt_boxes[ind, :]\n pred_ind = gt_to_pred_map.get(ind, None)\n if pred_ind is None:\n continue\n pred_box = pred_boxes[pred_ind, :]\n if use_pixel_level:\n o2o = pixel_iou(gt_box=gt_box, box=pred_box, binary_image=inv_page_binary)\n else:\n o2o = overlaps[ind, pred_ind]\n output_boxes.append(pred_box)\n output_titles.append('%4.3f' % o2o)\n word_stats['gt'] = gt_box.tolist()\n word_stats['pred'] = pred_box.tolist()\n word_stats['cover'] = o2o\n page_stats['word_%d' % ind] = word_stats\n\n if output_all:\n for ind in range(pred_boxes.shape[0]):\n pred_box = pred_boxes[ind, :]\n output_boxes.append(pred_box)\n output_titles.append('-')\n word_stats['pred'] = pred_box.tolist()\n page_stats['box_%d' % ind] = word_stats\n\n page_stats['predictions'] = pred_boxes.shape[0]\n page_stats['gt_boxes'] = gt_boxes.shape[0]\n preds_image = debugShowBoxes(page_image.copy(), boxes=output_boxes, gt_boxes=gt_boxes, titles=output_titles, dont_show=True)\n\n return page_stats, preds_image\n\n\ndef untrim_boxes(boxes, trim):\n box_dim = 1*(boxes.shape[1] == 5)\n new_boxes = []\n for i, box in enumerate(boxes):\n dx = int((box[box_dim + 2] - box[box_dim + 0]) * trim)\n box[box_dim + 0] -= dx\n box[box_dim + 2] += dx\n dy = int((box[box_dim + 3] - box[box_dim + 1]) * trim)\n box[box_dim + 1] -= dy\n box[box_dim + 3] += dy\n new_boxes.append(box)\n return np.vstack(new_boxes)\n\n\ndef train_viz(batch, rnd_boxes, rnd_labels, phoc_lab_thresh=0, unnormalize=None):\n rnd_images = []\n for i in range(batch['image'].shape[0]):\n idx = np.where(rnd_boxes[:, 0] == i)[0]\n batch_boxes = rnd_boxes[idx, :]\n gt_idx = np.where(batch['gt_boxes'][:, 0] == i)[0]\n batch_gt_boxes = batch['gt_boxes'][gt_idx, :]\n batch_labels = rnd_labels[idx]\n good_boxes = batch_boxes[np.where(batch_labels >= phoc_lab_thresh)[0], :]\n input_img = batch['image'].copy()[i, :]\n if unnormalize is not None:\n input_img = input_img*unnormalize\n rnd_image = debugShowBoxes(input_img, boxes=good_boxes[:, 1:], gt_boxes=batch_gt_boxes[:, 1:], dont_show=True)\n rnd_images.append(rnd_image[np.newaxis, :])\n return np.vstack(rnd_images)\n\n\ndef pixel_iou(gt_box, box, binary_image):\n gt_box = gt_box.astype(np.int32)\n # Number of black pixels in gt_box\n gt_pixels = np.sum(binary_image[gt_box[1]:gt_box[3], gt_box[0]:gt_box[2]])\n # Calculate intersection box between gt and prediction\n int_box = np.array(_intersection_box(gt_box, box), dtype=np.int32)\n # Number of black pixels in intersection box\n intersect_pixels = np.sum(binary_image[int_box[1]:int_box[3], int_box[0]:int_box[2]]).astype(np.float32)\n # rate of pixels covered by prediction\n o2o = intersect_pixels / gt_pixels\n return o2o\n\n\ndef _inverse_binary(binary_image, thresh=0.5, scale=255.):\n \"\"\" Turn binary picture so that black pixels will be valued 1. and white at 0.\"\"\"\n x = np.sum(binary_image, axis=2)\n sliced_image = x / (scale*binary_image.shape[2])\n inv_binary = (np.ones_like(sliced_image) - sliced_image)\n inverse_img = (inv_binary > thresh)*1\n return inverse_img\n\n\ndef _intersection_box(box1, box2):\n return max(box1[0], box2[0]), \\\n max(box1[1], box2[1]), \\\n min(box1[2], box2[2]), \\\n min(box1[3], box2[3])\n"
] | [
[
"numpy.where",
"numpy.ones_like",
"numpy.sum",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wendili-cs/Vehicle-License-Plate-System-by-FCN-CNN | [
"1bd29d73f43af8e973298c7a2aabb3ce082620fd"
] | [
"train_data.py"
] | [
"'''\r\nTensorFlow 1.3\r\nPython 3.6\r\nBy LiWenDi\r\n'''\r\nimport tensorflow as tf\r\nimport input_data\r\nimport os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nCHANNELS = 3 #色彩读取通道\r\nBATCH_SIZE = 10 #训练批次的数量\r\nCAPACITY = 500 #每次随机批次的总数量\r\nIMG_H = 40 #图像的高\r\nIMG_W = 20 #图像的宽\r\nINPUT_DATA = \"charSamples/\" #训练数据的根目录\r\nCACHE_DIR = 'D:/PythonCode/saved_model' #模型的储存目录\r\nLEARNING_RATE = 0.1 #学习率\r\nSTEPS = 5000 #训练次数\r\nMETHOD_NUM = 3\r\n#↑对图片调整大小的方法,其中:\r\n#0.双线性插值法\r\n#1.最近邻居法\r\n#2.双三次插值法\r\n#3.面积插值法\r\n\r\n\r\ntrain_batch, train_label_batch = input_data.get_batch(input_data.create_image_lists(INPUT_DATA, True), IMG_W, IMG_H, BATCH_SIZE, CAPACITY)\r\nx = tf.placeholder(tf.float32, [None, IMG_W * IMG_H])\r\nW = tf.Variable(tf.zeros([IMG_W * IMG_H, 34]), name = \"weights\" )\r\nb = tf.Variable(tf.zeros([34]), name = \"biases\" )\r\ny_without_softmax = tf.matmul(x, W) + b\r\ny = tf.nn.softmax(tf.matmul(x, W) + b)\r\ny_ = tf.placeholder(tf.float32, [None, 34])\r\ncross_entropy = tf.reduce_sum(tf.square(tf.subtract(y_,y)))\r\n#cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y, labels=tf.argmax(y_, 1)) #这是另一种计算交叉熵的函数\r\ntrain_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(cross_entropy)\r\n\r\n\r\n\r\nwith tf.Session() as sess:\r\n tf.global_variables_initializer().run()\r\n i = 0\r\n coord = tf.train.Coordinator()\r\n threads = tf.train.start_queue_runners(sess = sess, coord = coord)\r\n summer_op = tf.summary.merge_all()\r\n train_writer = tf.summary.FileWriter(CACHE_DIR, sess.graph)\r\n saver = tf.train.Saver()\r\n try:\r\n while not coord.should_stop() and i < STEPS:\r\n img, label = sess.run([train_batch, train_label_batch])\r\n\r\n train_step.run({x: np.reshape(img, [BATCH_SIZE, IMG_W * IMG_H]), y_: np.reshape(label, [BATCH_SIZE, 34])})\r\n \r\n if i % 100 == 0 :\r\n print(\"训练了\" + str(i) + \"次。\")\r\n summer_str = sess.run(summer_op)\r\n print(\"预测:\"+str(sess.run(tf.argmax(sess.run(y, feed_dict = {x: np.reshape(img, [BATCH_SIZE, IMG_H * IMG_W])}), 1))))\r\n print(\"标签:\"+str(sess.run(tf.argmax(np.reshape(label, [BATCH_SIZE, 34]), 1))))\r\n \r\n '''\r\n for each_img in np.reshape(img, [BATCH_SIZE, IMG_H , IMG_W, 1]):\r\n each_img = np.reshape(each_img, [IMG_H , IMG_W])\r\n plt.imshow(each_img)\r\n plt.show()\r\n #取消注释可以展示标签对应的图片\r\n '''\r\n\r\n if i == STEPS - 1:\r\n print(\"一切完成!\")\r\n checkpoint_path = os.path.join(CACHE_DIR, 'model.ckpt')\r\n saver_path = saver.save(sess, checkpoint_path)\r\n i += 1\r\n \r\n\r\n except tf.errors.OutOfRangeError:\r\n print(\"完成!\")\r\n finally:\r\n coord.request_stop()\r\n coord.join(threads)\r\n"
] | [
[
"tensorflow.matmul",
"tensorflow.summary.FileWriter",
"tensorflow.zeros",
"numpy.reshape",
"tensorflow.train.start_queue_runners",
"tensorflow.train.Coordinator",
"tensorflow.placeholder",
"tensorflow.subtract",
"tensorflow.global_variables_initializer",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.summary.merge_all",
"tensorflow.Session",
"tensorflow.train.Saver"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
benjaminhwilliams/screen19 | [
"6590aa3b59b488eef78fa9567c4cd0baf56ccaa8"
] | [
"screen19/screen.py"
] | [
"# coding: utf-8\n\nu\"\"\"\nProcess screening data obtained at Diamond Light Source Beamline I19.\n\nThis program presents the user with recommendations for adjustments to beam\nflux, based on a single-sweep screening data collection. It presents an\nupper- and lower-bound estimate of suitable flux.\n • The upper-bound estimate is based on a comparison of a histogram of\n measured pixel intensities with the trusted intensity range of the detector.\n The user is warned when the measured pixel intensities indicate that the\n detector would have a significant number of overloaded or untrustworthy\n pixels.\n • The lower-bound estimate is based on a linear fit of isotropic disorder\n parameter, B, to a Wilson plot of reflection intensities. From this,\n an estimate is made of the minimum exposure (flux × exposure time) required\n to achieve a target I/σ ratio (by default, target I/σ = 2) at one or more values\n of desired resolution, d, (by default, desired d = 1 Å, 0.84 Å, 0.6 Å & 0.4 Å).\n\nTarget I/σ and target d (in Ångström) can be set using the parameters\n'min_i_over_sigma' and 'desired_d'. One can set multiple values of the latter.\n\nBy default the disorder parameter fit is conducted on the\nintegrated data. This ought to provide a reasonably true fit, but requires\nan integration step, which can take some time. You can achieve a quicker,\ndirtier answer by fitting to the indexed data (i.e. only the stronger\nspots), using 'minimum_exposure.data=indexed'.\n\nExamples:\n\n screen19 imported_experiments.json\n\n screen19 *.cbf\n\n screen19 /path/to/data/\n\n screen19 /path/to/data/image0001.cbf:1:100\n\n screen19 min_i_over_sigma=2 desired_d=0.84 <imported_experiments.json | image_files>\n\n screen19 minimum_exposure.data=indexed <image_files>\n\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport json\nimport logging\nimport math\nimport os\nimport re\nimport sys\nimport time\nimport timeit\nfrom glob import glob\nfrom typing import Dict, List, Optional, Sequence, Tuple\n\nimport procrunner\nfrom six.moves.cPickle import PickleError\n\nimport iotbx.phil\nfrom libtbx import Auto\nfrom libtbx.introspection import number_of_processors\nfrom libtbx.phil import scope\n\nimport dials.command_line.integrate\nimport dials.util.version\nfrom dials.algorithms.indexing import DialsIndexError\nfrom dials.algorithms.indexing.bravais_settings import (\n refined_settings_from_refined_triclinic,\n)\nfrom dials.algorithms.shoebox import MaskCode\nfrom dials.array_family import flex\nfrom dials.command_line.dials_import import MetaDataUpdater\nfrom dials.command_line.index import index\nfrom dials.command_line.refine import run_dials_refine\nfrom dials.command_line.refine_bravais_settings import (\n bravais_lattice_to_space_group_table,\n eliminate_sys_absent,\n map_to_primitive,\n)\nfrom dials.util import Sorry, log, version\nfrom dials.util.ascii_art import spot_counts_per_image_plot\nfrom dials.util.options import OptionParser\nfrom dxtbx.model import ExperimentList\nfrom dxtbx.model.experiment_list import (\n BeamComparison,\n DetectorComparison,\n ExperimentListFactory,\n ExperimentListTemplateImporter,\n GoniometerComparison,\n)\n\nimport screen19\nfrom screen19.minimum_exposure import suggest_minimum_exposure\n\nTemplates = List[Tuple[str, Tuple[int, int]]]\n\nphil_scope = iotbx.phil.parse(\n u\"\"\"\n verbosity = 0\n .type = int(value_min=0)\n .caption = 'Verbosity level of log output'\n .help = \"Possible values:\\n\"\n \"\\t• 0: Info log output to stdout/logfile\\n\"\n \"\\t• 1: Info & debug log output to stdout/logfile\"\n\n output\n .caption = 'Options to control the output files'\n {\n log = 'screen19.log'\n .type = str\n .caption = \"The log filename\"\n }\n nproc = Auto\n .type = int\n .caption = 'Number of processors to use'\n .help = \"The chosen value will apply to all the DIALS utilities with a \"\n \"multi-processing option. If 'False' or 'Auto', all available \"\n \"processors will be used.\"\n\n minimum_exposure\n .caption = 'Options for screen19.minimum_exposure'\n {\n include scope screen19.minimum_exposure.phil_scope\n data = indexed *integrated\n .type = choice\n .caption = 'Choice of data for the displacement parameter fit'\n .help = 'For the lower-bound exposure estimate, choose whether to use '\n 'indexed (quicker) or integrated (better) data in fitting '\n 'the isotropic displacement parameter.'\n }\n\n maximum_flux\n .caption = 'Options for avoiding detector paralysation'\n {\n trusted_range_correction = 0.25\n .type = float(value_min=0, value_max=1)\n .caption = 'Factor by which to multiply the maximum trusted flux.'\n .help = \"The detector manufacturer's photon count correction, to correct \"\n \"for pixel paralysation, is often found to be unreliable at photon \"\n \"counts in the upper part of the nominal trusted range. In such \"\n \"cases, this factor can be used to adjust the upper limit of the \"\n \"trusted range. Pilatus detectors, for example, have been found \"\n \"not to give reliable correction for photon counts greater than \"\n \"0.25 × the manufacturer's trusted range. It is therefore \"\n \"sensible to present the user with a correspondingly reduced upper-\"\n \"limit flux recommendation.\"\n }\n\n dials_import\n .caption = 'Options for dials.import'\n {\n include scope dials.command_line.dials_import.phil_scope\n\n input\n {\n include scope dials.util.options.tolerance_phil_scope\n\n experiments = None\n .help = \"The experiment list file path\"\n .type = str\n .multiple = True\n .optional = True\n }\n }\n\n dials_find_spots\n .caption = 'Options for dials.find_spots'\n {\n include scope dials.command_line.find_spots.phil_scope\n }\n\n dials_index\n .caption = 'Options for dials.index'\n {\n include scope dials.command_line.index.phil_scope\n }\n\n dials_refine\n .caption = 'Options for dials.refine'\n {\n include scope dials.command_line.refine.phil_scope\n }\n\n dials_refine_bravais\n .caption = 'Options for dials.refine_bravais_settings'\n {\n include scope dials.command_line.refine_bravais_settings.phil_scope\n }\n\n dials_create_profile\n .caption = 'Options for dials.create_profile_model'\n {\n include scope dials.command_line.create_profile_model.phil_scope\n }\n\n dials_integrate\n .caption = 'Options for dials.integrate'\n {\n include scope dials.command_line.integrate.phil_scope\n }\n\n dials_report\n .caption = 'Options for dials.report'\n {\n include scope dials.command_line.report.phil_scope\n }\n \"\"\",\n process_includes=True,\n)\n\nprocrunner_debug = False\n\nlogger = logging.getLogger(\"dials.screen19\")\ndebug, info, warning = logger.debug, logger.info, logger.warning\n\n\ndef _run_integration(scope, experiments_file, reflections_file):\n # type: (scope, str, str) -> Tuple[ExperimentList, flex.reflection_table]\n \"\"\"Run integration programatically, compatible with multiple DIALS versions.\n\n Args:\n scope: The dials.integrate phil scope\n experiments_file: Path to the experiment list file\n reflections_file: Path to the reflection table file\n \"\"\"\n\n if hasattr(dials.command_line.integrate, \"run_integration\"):\n # DIALS 3.1+ interface\n expts, refls, _ = dials.command_line.integrate.run_integration(\n scope.extract(),\n ExperimentList.from_file(experiments_file),\n flex.reflection_table.from_file(reflections_file),\n )\n elif hasattr(dials.command_line.integrate, \"Script\"):\n # Pre-3.1-style programmatic interface\n expts, refls = dials.command_line.integrate.Script(phil=scope).run(\n [experiments_file, reflections_file]\n )\n else:\n raise RuntimeError(\n \"Could not find dials.integrate programmatic interface 'run_integration' or 'Script'\"\n )\n\n return expts, refls\n\n\ndef overloads_histogram(d_spacings, ticks=None, output=\"overloads\"):\n # type: (Sequence[float], Optional[Sequence[float]], Optional[str]) -> None\n \"\"\"\n Generate a histogram of reflection d-spacings as an image, default is .png.\n\n Args:\n d_spacings: d-spacings of the reflections.\n ticks (optional): d-values for the tick positions on the 1/d axis.\n output (optional): Output filename root, to which the extension `.png` will\n be appended. Default is `overloads`.\n \"\"\"\n import matplotlib\n\n matplotlib.use(\"Agg\")\n from matplotlib import pyplot as plt\n\n plt.xlabel(u\"d (Å) (inverse scale)\")\n plt.ylabel(u\"Number of overloaded reflections\")\n if ticks:\n plt.xticks([1 / d for d in ticks], [\"%g\" % d for d in ticks])\n plt.yscale(\"log\", nonposy=\"clip\")\n plt.hist(d_spacings, min(100, d_spacings.size()))\n plt.savefig(output)\n plt.close()\n\n\nclass Screen19(object):\n \"\"\"Encapsulates the screening script.\"\"\"\n\n def __init__(self):\n # Throughout the pipeline, retain the state of the processing.\n self.expts = ExperimentList([])\n self.refls = flex.reflection_table()\n # Get some default parameters. These must be extracted from the 'fetched'\n # PHIL scope, rather than the 'definition' phil scope returned by\n # iotbx.phil.parse. Confused? Blame PHIL.\n self.params = phil_scope.fetch(iotbx.phil.parse(\"\")).extract()\n\n def _quick_import(self, files): # type: (List[str]) -> bool\n \"\"\"\n Generate xia2-style templates from file names and attempt a quick import.\n\n From each given filename, generate a filename template by substituting a hash\n character (#) for each numeral in the last contiguous group of numerals\n before the file extension. For example, the filename `example_01_0001.cbf`\n becomes `example_01_####.cbf`.\n\n Contiguous image ranges are recorded by associating the start and end image\n number of the range with the relevant filename template.\n\n dials.import is then run with options to extrapolate header information from\n the first image file, thereby running more quickly than reading each image\n header individually.\n\n Args:\n files: List of image filenames.\n\n Returns:\n Boolean flag indicating whether the quick import has succeeded.\n \"\"\"\n if len(files) == 1:\n # No point in quick-importing a single file\n return False\n debug(\"Attempting quick import...\")\n files.sort()\n templates = {} # type: Dict[str, List[Optional[List[int]]]]\n for f in files:\n template, image = screen19.make_template(f)\n if template not in templates:\n image_range = [image, image] if image else []\n templates.update({template: [image_range]})\n elif image == templates[template][-1][-1] + 1:\n templates[template][-1][-1] = image\n elif image == templates[template][-1][-1]:\n # We have a duplicate input file name. Do nothing.\n pass\n else:\n templates[template].append([image, image])\n # Return tuple of template and image range for each unique image range\n templates = [\n (t, tuple(r)) for t, ranges in templates.items() for r in ranges\n ] # type: Templates\n return self._quick_import_templates(templates)\n\n def _quick_import_templates(self, templates): # type: (Templates) -> bool\n \"\"\"\n Take image file templates and frame number ranges and try to run dials.import.\n\n dials.import is run with options to extrapolate header information from\n the first image file, thereby running more quickly than reading each image\n header individually.\n\n Args:\n templates: A list of tuples, each tuple containing a xia2-style filename\n template and the start and end image numbers of the associated\n sweep.\n\n Returns:\n Boolean flag indicating whether the quick import has succeeded.\n \"\"\"\n debug(\"Quick import template summary:\\n\\t%s\", templates)\n if len(templates) > 1:\n debug(\"Cannot currently run quick import on multiple templates.\")\n return False\n\n try:\n scan_range = templates[0][1] # type: Tuple[int, int]\n if not scan_range:\n raise IndexError\n except IndexError:\n debug(\"Cannot run quick import: could not determine image naming template.\")\n return False\n\n info(\"Running quick import.\")\n self.params.dials_import.input.template = [templates[0][0]]\n self.params.dials_import.geometry.scan.image_range = scan_range\n self.params.dials_import.geometry.scan.extrapolate_scan = True\n self._run_dials_import()\n\n return True\n\n def _import(self, files): # type: (List[str]) -> None\n \"\"\"\n Try to run a quick call of dials.import. Failing that, run a slow call.\n\n Try initially to construct file name templates contiguous groups of files.\n Failing that, pass a full list of the files to the importer (slower).\n\n Args:\n files: List of image filenames.\n \"\"\"\n info(\"\\nImporting data...\")\n if len(files) == 1:\n if os.path.isdir(files[0]):\n debug(\n \"You specified a directory. Importing all CBF files in \"\n \"that directory.\"\n )\n # TODO Support HDF5.\n files = [\n os.path.join(files[0], f)\n for f in os.listdir(files[0])\n if f.endswith(\".cbf\")\n or f.endswith(\".cbf.gz\")\n or f.endswith(\".cbf.bz2\")\n ]\n elif len(files[0].split(\":\")) == 3:\n debug(\n \"You specified an image range in the xia2 format. \"\n \"Importing all specified files.\"\n )\n template, start, end = files[0].split(\":\")\n template = screen19.make_template(template)[0]\n start, end = int(start), int(end)\n if not self._quick_import_templates([(template, (start, end))]):\n warning(\"Could not import specified image range.\")\n sys.exit(1)\n info(\"Quick import successful.\")\n return\n elif files[0].endswith(\".expt\"):\n debug(\n \"You specified an existing experiment list file. \"\n \"No import necessary.\"\n )\n try:\n self.expts = ExperimentList.from_file(files[0])\n except (IOError, PickleError, ValueError):\n pass\n else:\n self.params.dials_import.output.experiments = files[0]\n if self.expts:\n return\n\n if not files:\n warning(\"No images found matching input.\")\n sys.exit(1)\n\n # Can the files be quick-imported?\n if self._quick_import(files):\n info(\"Quick import successful.\")\n return\n\n self.params.dials_import.input.experiments = files\n self._run_dials_import()\n\n def _run_dials_import(self):\n \"\"\"\n Perform a minimal version of dials.import to get an experiment list.\n\n Use some filleted bits of dials.import and dials.util.options.Importer.\n \"\"\"\n # Get some key data format arguments.\n try:\n format_kwargs = {\n \"dynamic_shadowing\": self.params.dials_import.format.dynamic_shadowing,\n \"multi_panel\": self.params.dials_import.format.multi_panel,\n }\n except AttributeError:\n format_kwargs = {}\n\n # If filenames contain wildcards, expand\n args = []\n for arg in self.params.dials_import.input.experiments:\n if \"*\" in arg:\n args.extend(glob(arg))\n else:\n args.append(arg)\n\n if args:\n # Are compare{beam,detector,goniometer} and scan_tolerance necessary?\n # They are cargo-culted from the DIALS option parser.\n tol_params = self.params.dials_import.input.tolerance\n compare_beam = BeamComparison(\n wavelength_tolerance=tol_params.beam.wavelength,\n direction_tolerance=tol_params.beam.direction,\n polarization_normal_tolerance=tol_params.beam.polarization_normal,\n polarization_fraction_tolerance=tol_params.beam.polarization_fraction,\n )\n compare_detector = DetectorComparison(\n fast_axis_tolerance=tol_params.detector.fast_axis,\n slow_axis_tolerance=tol_params.detector.slow_axis,\n origin_tolerance=tol_params.detector.origin,\n )\n compare_goniometer = GoniometerComparison(\n rotation_axis_tolerance=tol_params.goniometer.rotation_axis,\n fixed_rotation_tolerance=tol_params.goniometer.fixed_rotation,\n setting_rotation_tolerance=tol_params.goniometer.setting_rotation,\n )\n scan_tolerance = tol_params.scan.oscillation\n\n # Import an experiment list from image data.\n try:\n experiments = ExperimentListFactory.from_filenames(\n args,\n compare_beam=compare_beam,\n compare_detector=compare_detector,\n compare_goniometer=compare_goniometer,\n scan_tolerance=scan_tolerance,\n format_kwargs=format_kwargs,\n )\n except IOError as e:\n warning(\"%s '%s'\", e.strerror, e.filename)\n sys.exit(1)\n\n # Record the imported experiments for use elsewhere.\n # Quit if there aren't any.\n self.expts.extend(experiments)\n if not self.expts:\n warning(\"No images found.\")\n sys.exit(1)\n\n else:\n # Use the template importer.\n if len(self.params.dials_import.input.template) > 0:\n importer = ExperimentListTemplateImporter(\n self.params.dials_import.input.template, format_kwargs=format_kwargs\n )\n # Record the imported experiments for use elsewhere.\n # Quit if there aren't any.\n self.expts.extend(importer.experiments)\n if not self.expts:\n warning(\n \"No images found matching template %s\"\n % self.params.dials_import.input.template[0]\n )\n sys.exit(1)\n\n # Setup the metadata updater\n metadata_updater = MetaDataUpdater(self.params.dials_import)\n\n # Extract the experiments and loop through\n self.expts = metadata_updater(self.expts.imagesets())\n\n def _count_processors(self, nproc=None): # type: (Optional[int]) -> None\n \"\"\"\n Determine the number of processors and save it as an instance variable.\n\n The user may specify the number of processors to use. If no value is\n given, the number of available processors is returned.\n\n Args:\n nproc (optional): Number of processors.\n \"\"\"\n if nproc and nproc is not Auto:\n self.nproc = nproc\n return\n\n # if environmental variable NSLOTS is set to a number then use that\n try:\n self.nproc = int(os.environ.get(\"NSLOTS\"))\n return\n except (ValueError, TypeError):\n pass\n\n self.nproc = number_of_processors(return_value_if_unknown=-1)\n\n if self.nproc <= 0:\n warning(\n \"Could not determine number of available processors. Error code %d\",\n self.nproc,\n )\n sys.exit(1)\n\n def _count_images(self): # type: () -> int\n \"\"\"\n Attempt to determine the number of diffraction images.\n\n The number of diffraction images is determined from the imported_experiments\n JSON file.\n\n Returns:\n Number of images.\n \"\"\"\n # FIXME: This exception handling should be redundant. Empty experiment\n # lists should get caught at the import stage. Is this so?\n try:\n return self.expts[0].imageset.size()\n except IndexError:\n warning(\"Could not determine number of images in dataset.\")\n sys.exit(1)\n\n def _check_intensities(self, mosaicity_correction=True): # type: (bool) -> None\n \"\"\"\n Run xia2.overload and plot a histogram of pixel intensities.\n\n If `mosaicity_correction` is true, the pixel intensities are approximately\n adjusted to take account of a systematic defect in the detector count rate\n correction. See https://github.com/xia2/screen19/wiki#mosaicity-correction\n\n Args:\n mosaicity_correction (optional): default is `True`.\n \"\"\"\n info(\"\\nTesting pixel intensities...\")\n command = [\"xia2.overload\", \"nproc=%s\" % self.nproc, \"indexed.expt\"]\n debug(\"running %s\", command)\n result = procrunner.run(command, print_stdout=False, debug=procrunner_debug)\n debug(\"result = %s\", screen19.prettyprint_dictionary(result))\n info(\"Successfully completed (%.1f sec)\", result[\"runtime\"])\n\n if result[\"exitcode\"] != 0:\n warning(\"Failed with exit code %d\", result[\"exitcode\"])\n sys.exit(1)\n\n with open(\"overload.json\") as fh:\n overload_data = json.load(fh)\n\n info(\"Pixel intensity distribution:\")\n count_sum = 0\n hist = {}\n if \"bins\" in overload_data:\n for b in range(overload_data[\"bin_count\"]):\n if overload_data[\"bins\"][b] > 0:\n hist[b] = overload_data[\"bins\"][b]\n count_sum += b * overload_data[\"bins\"][b]\n else:\n hist = {int(k): v for k, v in overload_data[\"counts\"].items() if int(k) > 0}\n count_sum = sum([k * v for k, v in hist.items()])\n\n average_to_peak = 1\n if mosaicity_correction:\n # Adjust for the detector count rate correction\n if self._sigma_m:\n delta_z = self._oscillation / self._sigma_m / math.sqrt(2)\n average_to_peak = (\n math.sqrt(math.pi) * delta_z * math.erf(delta_z)\n + math.exp(-(delta_z ** 2))\n - 1\n ) / delta_z ** 2\n info(\"Average-to-peak intensity ratio: %f\", average_to_peak)\n\n scale = 100 * overload_data[\"scale_factor\"] / average_to_peak\n info(\"Determined scale factor for intensities as %f\", scale)\n\n debug(\n \"intensity histogram: { %s }\",\n \", \".join([\"%d:%d\" % (k, hist[k]) for k in sorted(hist)]),\n )\n max_count = max(hist.keys())\n hist_max = max_count * scale\n hist_granularity, hist_format = 1, \"%.0f\"\n if hist_max < 50:\n hist_granularity, hist_format = 2, \"%.1f\"\n if hist_max < 15:\n hist_granularity, hist_format = 10, \"%.1f\"\n rescaled_hist = {}\n for x in hist.keys():\n rescaled = round(x * scale * hist_granularity)\n if rescaled > 0:\n rescaled_hist[rescaled] = hist[x] + rescaled_hist.get(rescaled, 0)\n hist = rescaled_hist\n debug(\n \"rescaled histogram: { %s }\",\n \", \".join(\n [\n (hist_format + \":%d\") % (k / hist_granularity, hist[k])\n for k in sorted(hist)\n ]\n ),\n )\n\n screen19.plot_intensities(\n hist, 1 / hist_granularity, procrunner_debug=procrunner_debug\n )\n\n linear_response_limit = 100 * self.params.maximum_flux.trusted_range_correction\n marginal_limit = max(70, linear_response_limit)\n\n text = \"\".join(\n (\n \"Strongest pixel (%d counts) \" % max_count,\n \"reaches %.1f%% \" % hist_max,\n \"of the detector count rate limit\",\n )\n )\n if hist_max > 100:\n warning(\"Warning: %s!\", text)\n else:\n info(text)\n if (\n \"overload_limit\" in overload_data\n and max_count >= overload_data[\"overload_limit\"]\n ):\n warning(\n \"Warning: THE DATA CONTAIN REGULAR OVERLOADS!\\n\"\n \" The photon incidence rate is outside the specified \"\n \"limits of the detector.\\n\"\n \" The built-in detector count rate correction cannot \"\n \"adjust for this.\\n\"\n \" You should aim for count rates below {:.0%} of the \"\n \"detector limit.\".format(\n self.params.maximum_flux.trusted_range_correction\n )\n )\n elif hist_max > marginal_limit:\n warning(\n \"Warning: The photon incidence rate is well outside the \"\n \"linear response region of the detector (<{:.0%}).\\n\"\n \" The built-in detector count rate correction may not be \"\n \"able to adjust for this.\".format(\n self.params.maximum_flux.trusted_range_correction\n )\n )\n elif hist_max > linear_response_limit:\n info(\n \"The photon incidence rate is outside the linear response \"\n \"region of the detector (<{:.0%}).\\n\"\n \" The built-in detector count rate correction may be able \"\n \"to adjust for this.\".format(\n self.params.maximum_flux.trusted_range_correction\n )\n )\n if not mosaicity_correction:\n warning(\n \"Warning: Not enough data for proper profile estimation.\"\n \" The spot intensities are not corrected for mosaicity.\\n\"\n \" The true photon incidence rate will be higher than the \"\n \"given estimate.\"\n )\n\n info(\"Total sum of counts in dataset: %d\", count_sum)\n\n def _find_spots(self, args=None): # type: (Optional[List[str]]) -> None\n \"\"\"\n Call `dials.find_spots` on the imported experiment list.\n\n Args:\n args (optional): List of any additional PHIL parameters to be used by\n dials.import.\n \"\"\"\n info(\"\\nFinding spots...\")\n\n dials_start = timeit.default_timer()\n\n # Use some choice fillets from dials.find_spots\n # Ignore `args`, use `self.params`\n\n # Loop through all the imagesets and find the strong spots\n\n self.refls = flex.reflection_table.from_observations(\n self.expts, self.params.dials_find_spots\n )\n\n # Add n_signal column - before deleting shoeboxes\n\n good = MaskCode.Foreground | MaskCode.Valid\n self.refls[\"n_signal\"] = self.refls[\"shoebox\"].count_mask_values(good)\n\n # Delete the shoeboxes\n if not self.params.dials_find_spots.output.shoeboxes:\n del self.refls[\"shoebox\"]\n\n info(\n 60 * \"-\" + \"\\n%s\\n\" + 60 * \"-\" + \"\\nSuccessfully completed (%.1f sec)\",\n spot_counts_per_image_plot(self.refls),\n timeit.default_timer() - dials_start,\n )\n\n def _index(self): # type: () -> bool\n \"\"\"\n Call `dials.index` on the output of spot finding.\n\n Returns:\n Boolean value indicating whether indexing was successful.\n \"\"\"\n dials_start = timeit.default_timer()\n\n # Prepare max_cell constraint strategies.\n max_cell = self.params.dials_index.indexing.max_cell\n # By default, try unconstrained max_cell followed by max_cell=20.\n # If the user has already specified a max_cell < 20, do not relax to 20Å.\n cell_constraints = [([], max_cell)]\n if not max_cell or max_cell is Auto or max_cell > 20:\n cell_constraints += [([\"max_cell constraint\"], 20)]\n\n # Prepare indexing methods, preferring the real_space_grid_search if a\n # known unit cell has been specified, otherwise using 3D FFT, then 1D FFT.\n methods = (\n [([\"real space grid search\"], \"real_space_grid_search\")]\n if self.params.dials_index.indexing.known_symmetry.unit_cell\n else []\n )\n methods += [([\"3D FFT\"], \"fft3d\"), ([\"1D FFT\"], \"fft1d\")]\n\n # Cycle through the indexing methods for each of the max_cell constraint\n # strategies until an indexing solution is found.\n for i, (max_cell_msg, max_cell) in enumerate(cell_constraints):\n # Set the max_cell constraint strategy.\n self.params.dials_index.indexing.max_cell = max_cell\n for j, (method_msg, method) in enumerate(methods):\n # Set the indexing method.\n self.params.dials_index.indexing.method = method\n # Log a handy message to the user.\n msg = (\n \"Retrying with \" + \" and \".join(method_msg + max_cell_msg)\n if i + j\n else \"Indexing\"\n )\n info(\"\\n%s...\", msg)\n try:\n # If indexing is successful, break out of the inner loop.\n self.expts, self.refls = index(\n self.expts, [self.refls], self.params.dials_index\n )\n break\n except (DialsIndexError, ValueError) as e:\n # If indexing is unsuccessful, try again with the next\n # strategy.\n warning(\"Failed: %s\", str(e))\n continue\n else:\n # When all the indexing methods are unsuccessful, move onto\n # the next max_cell constraint strategy and try again.\n continue\n # We should only get here if successfully indexed. Break out of the loop\n break\n else:\n # Indexing completely unsuccessful.\n return False\n\n sg_type = self.expts[0].crystal.get_crystal_symmetry().space_group().type()\n symb = sg_type.universal_hermann_mauguin_symbol()\n unit_cell = self.expts[0].crystal.get_unit_cell()\n\n self.refls.as_file(self.params.dials_index.output.reflections)\n self.expts.as_file(self.params.dials_index.output.experiments)\n self.refls.as_file(self.params.dials_index.output.reflections)\n info(\n \"Found primitive solution: %s %s using %s reflections\\n\"\n \"Indexed experiments and reflections saved as %s, %s\\n\"\n \"Successfully completed (%.1f sec)\",\n symb,\n unit_cell,\n self.refls[\"id\"].count(0),\n self.params.dials_index.output.experiments,\n self.params.dials_index.output.reflections,\n timeit.default_timer() - dials_start,\n )\n\n # Report the indexing successful.\n return True\n\n def _wilson_calculation(self): # type: () -> None\n \"\"\"\n Run `screen19.minimum_exposure` on an experiment list and reflection table.\n\n For best results, the reflections and experiment list should contain the\n results of integration or scaling. If only strong spots are used, the Wilson\n plot fit may be poor.\n \"\"\"\n dials_start = timeit.default_timer()\n info(\"\\nEstimating lower exposure bound...\")\n\n suggest_minimum_exposure(self.expts, self.refls, self.params.minimum_exposure)\n\n info(\"Successfully completed (%.1f sec)\", timeit.default_timer() - dials_start)\n\n def _refine(self): # type: () -> None\n \"\"\"\n Run `dials.refine` on the results of indexing.\n \"\"\"\n dials_start = timeit.default_timer()\n info(\"\\nRefining...\")\n\n try:\n self.expts, self.refls, _, _ = run_dials_refine(\n self.expts, self.refls, self.params.dials_refine\n )\n except Sorry as e:\n warning(\"dials.refine failed: %d\\nGiving up.\\n\", e)\n sys.exit(1)\n\n info(\"Successfully refined (%.1f sec)\", timeit.default_timer() - dials_start)\n\n def _create_profile_model(self): # type: () -> bool\n \"\"\"\n Run `dials.create_profile_model` on indexed reflections.\n\n The indexed experiment list will be overwritten with a copy that includes\n the profile model but is otherwise identical.\n\n Returns:\n Boolean value indicating whether it was possible to determine a profile\n model from the data.\n \"\"\"\n info(\"\\nCreating profile model...\")\n command = [\n \"dials.create_profile_model\",\n self.params.dials_index.output.experiments,\n self.params.dials_index.output.reflections,\n \"output = %s\" % self.params.dials_index.output.experiments,\n ]\n result = procrunner.run(command, print_stdout=False, debug=procrunner_debug)\n debug(\"result = %s\", screen19.prettyprint_dictionary(result))\n self._sigma_m = None\n if result[\"exitcode\"] == 0:\n db = ExperimentList.from_file(self.params.dials_index.output.experiments)[0]\n self._oscillation = db.imageset.get_scan().get_oscillation()[1]\n self._sigma_m = db.profile.sigma_m()\n info(\n u\"%d images, %s° oscillation, σ_m=%.3f°\",\n db.imageset.get_scan().get_num_images(),\n str(self._oscillation),\n self._sigma_m,\n )\n info(\"Successfully completed (%.1f sec)\", result[\"runtime\"])\n return True\n warning(\"Failed with exit code %d\", result[\"exitcode\"])\n return False\n\n def _integrate(self): # type: () -> None\n \"\"\"Run `dials.integrate` to integrate reflection intensities.\"\"\"\n dials_start = timeit.default_timer()\n info(\"\\nIntegrating...\")\n\n # Don't waste time recreating the profile model\n self.params.dials_integrate.create_profile_model = False\n # Get the dials.integrate PHIL scope, populated with parsed input parameters\n integrate_scope = phil_scope.get(\"dials_integrate\").objects[0]\n integrate_scope.name = \"\"\n integrate_scope = integrate_scope.format(self.params.dials_integrate)\n\n try:\n integrated_experiments, integrated_reflections = _run_integration(\n integrate_scope,\n self.params.dials_index.output.experiments,\n self.params.dials_index.output.reflections,\n )\n # Save the output to files\n integrated_reflections.as_file(\n self.params.dials_integrate.output.reflections\n )\n integrated_experiments.as_file(\n self.params.dials_integrate.output.experiments\n )\n # ... and also store the output internally\n self.expts, self.refls = integrated_experiments, integrated_reflections\n info(\n \"Successfully completed (%.1f sec)\",\n timeit.default_timer() - dials_start,\n )\n except SystemExit as e:\n if e.code:\n warning(\"dials.integrate failed with exit code %d\\nGiving up.\", e.code)\n sys.exit(1)\n\n # This is a hacky check but should work for as long as DIALS 2.0 is supported.\n if version.dials_version() < \"DIALS 2.1\":\n\n def _refine_bravais(self, experiments, reflections):\n # type: (ExperimentList, flex.reflection_table) -> None\n \"\"\"\n Run `dials.refine_bravais_settings` on an experiments and reflections.\n\n Args:\n experiments: An experiment list..\n reflections: The corresponding reflection table.\n \"\"\"\n info(\"\\nRefining Bravais settings...\")\n command = [\"dials.refine_bravais_settings\", experiments, reflections]\n result = procrunner.run(command, print_stdout=False, debug=procrunner_debug)\n debug(\"result = %s\", screen19.prettyprint_dictionary(result))\n if result[\"exitcode\"] == 0:\n m = re.search(\n r\"[-+]{3,}\\n[^\\n]*\\n[-+|]{3,}\\n(.*\\n)*[-+]{3,}\",\n result[\"stdout\"].decode(\"utf-8\"),\n )\n if m:\n info(m.group(0))\n else:\n info(\n \"Could not interpret dials.refine_bravais_settings output, \"\n \"please check dials.refine_bravais_settings.log\"\n )\n info(\"Successfully completed (%.1f sec)\", result[\"runtime\"])\n else:\n warning(\"Failed with exit code %d\", result[\"exitcode\"])\n sys.exit(1)\n\n else:\n\n def _refine_bravais(self): # type: () -> None\n \"\"\"Run `dials.refine_bravais_settings` to determine the space group.\"\"\"\n dials_start = timeit.default_timer()\n info(\"\\nRefining Bravais settings...\")\n\n self.refls = eliminate_sys_absent(self.expts, self.refls)\n map_to_primitive(self.expts, self.refls)\n\n try:\n refined_settings = refined_settings_from_refined_triclinic(\n self.expts, self.refls, self.params.dials_refine_bravais\n )\n except RuntimeError as e:\n warning(\"dials.refine_bravais_settings failed.\\nGiving up.\")\n sys.exit(e)\n\n possible_bravais_settings = {\n solution[\"bravais\"] for solution in refined_settings\n }\n bravais_lattice_to_space_group_table(possible_bravais_settings)\n try:\n # Old version of dials with as_str() method\n logger.info(refined_settings.as_str())\n except AttributeError:\n # Newer versions of dials (>= 2.2.2) has proper __str__ method\n logger.info(refined_settings)\n\n info(\n \"Successfully completed (%.1f sec)\",\n timeit.default_timer() - dials_start,\n )\n\n def _report(self, experiments, reflections):\n # type: (ExperimentList, flex.reflection_table) -> None\n \"\"\"\n Run `dials.report` on an experiment list and reflection table.\n\n Args:\n experiments: An experiment list.\n reflections: The corresponding reflection table.\n \"\"\"\n info(\"\\nCreating report...\")\n command = [\"dials.report\", experiments, reflections]\n result = procrunner.run(command, print_stdout=False, debug=procrunner_debug)\n debug(\"result = %s\", screen19.prettyprint_dictionary(result))\n if result[\"exitcode\"] == 0:\n info(\"Successfully completed (%.1f sec)\", result[\"runtime\"])\n # if sys.stdout.isatty():\n # info(\"Trying to start browser\")\n # try:\n # import subprocess\n # d = dict(os.environ)\n # d[\"LD_LIBRARY_PATH\"] = \"\"\n # subprocess.Popen([\"xdg-open\", \"dials-report.html\"], env=d)\n # except Exception as e:\n # debug(\"Could not open browser\\n%s\", str(e))\n else:\n warning(\"Failed with exit code %d\", result[\"exitcode\"])\n sys.exit(1)\n\n def run(self, args=None, phil=phil_scope, set_up_logging=False):\n # type: (Optional[List[str]], scope, bool) -> None\n \"\"\"\n TODO: Docstring.\n\n Args:\n args:\n phil:\n set_up_logging:\n\n Returns:\n\n \"\"\"\n usage = \"%prog [options] image_directory | image_files.cbf | imported.expt\"\n\n parser = OptionParser(\n usage=usage, epilog=__doc__, phil=phil, check_format=False\n )\n\n self.params, options, unhandled = parser.parse_args(\n args=args, show_diff_phil=True, return_unhandled=True, quick_parse=True\n )\n\n version_information = \"screen19 v%s using %s (%s)\" % (\n screen19.__version__,\n dials.util.version.dials_version(),\n time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n )\n\n start = timeit.default_timer()\n\n if len(unhandled) == 0:\n print(__doc__)\n print(version_information)\n return\n\n if set_up_logging:\n # Configure the logging\n log.config(verbosity=self.params.verbosity, logfile=self.params.output.log)\n # Unless verbose output has been requested, suppress generation of\n # debug and info log records from any child DIALS command, retaining\n # those from screen19 itself.\n if not self.params.verbosity:\n logging.getLogger(\"dials\").setLevel(logging.WARNING)\n logging.getLogger(\"dials.screen19\").setLevel(logging.INFO)\n\n info(version_information)\n debug(\"Run with:\\n%s\\n%s\", \" \".join(unhandled), parser.diff_phil.as_str())\n\n self._count_processors(nproc=self.params.nproc)\n debug(\"Using %s processors\", self.nproc)\n # Set multiprocessing settings for spot-finding, indexing and\n # integration to match the top-level specified number of processors\n self.params.dials_find_spots.spotfinder.mp.nproc = self.nproc\n self.params.dials_index.indexing.nproc = self.nproc\n # Setting self.params.dials_refine.refinement.mp.nproc is not helpful\n self.params.dials_integrate.integration.mp.nproc = self.nproc\n\n # Set the input and output parameters for the DIALS components\n # TODO: Compare to diff_phil and start from later in the pipeline if\n # appropriate\n self._import(unhandled)\n imported_name = self.params.dials_import.output.experiments\n\n self._find_spots()\n\n if not self._index():\n info(\"\\nRetrying for stronger spots only...\")\n strong_refls = self.refls\n self.params.dials_find_spots.spotfinder.threshold.dispersion.sigma_strong = (\n 15\n )\n self._find_spots()\n\n if not self._index():\n warning(\"Giving up.\")\n self.expts.as_file(imported_name)\n strong_refls.as_file(\"strong.refl\")\n self.refls.as_file(\"stronger.refl\")\n info(\n \"Could not find an indexing solution. You may want to \"\n \"have a look at the reciprocal space by running:\\n\\n\"\n \" dials.reciprocal_lattice_viewer %s %s\\n\\n\"\n \"or, to only include stronger spots:\\n\\n\"\n \" dials.reciprocal_lattice_viewer %s %s\\n\",\n imported_name,\n \"strong.refl\",\n imported_name,\n \"stronger.refl\",\n )\n sys.exit(1)\n\n if not self._create_profile_model():\n info(\"\\nRefining model to attempt to increase number of valid spots...\")\n self._refine()\n if not self._create_profile_model():\n warning(\"Giving up.\")\n info(\n \"The identified indexing solution may not be correct. \"\n \"You may want to have a look at the reciprocal space by \"\n \"running:\\n\\n\"\n \" dials.reciprocal_lattice_viewer indexed.expt indexed.refl\\n\"\n )\n sys.exit(1)\n\n self._check_intensities()\n\n if self.params.minimum_exposure.data == \"integrated\":\n self._integrate()\n\n self._wilson_calculation()\n\n experiments = self.params.dials_integrate.output.experiments\n reflections = self.params.dials_integrate.output.reflections\n else:\n self._wilson_calculation()\n\n experiments = self.params.dials_create_profile.output\n reflections = self.params.dials_index.output.reflections\n\n # This is a hacky check but should work for as long as DIALS 2.0 is supported.\n if version.dials_version() < \"DIALS 2.1\":\n self._refine_bravais(experiments, reflections)\n else:\n self._refine_bravais()\n\n self._report(experiments, reflections)\n\n runtime = timeit.default_timer() - start\n debug(\n \"Finished at %s, total runtime: %.1f\",\n time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n runtime,\n )\n info(\"screen19 successfully completed (%.1f sec).\", runtime)\n\n\ndef main(): # type: () -> None\n \"\"\"Dispatcher for command-line call.\"\"\"\n Screen19().run(set_up_logging=True)\n"
] | [
[
"matplotlib.use",
"matplotlib.pyplot.yscale",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sobotka/colour | [
"aa3fe95fba83ffc0f3ce1eb6aca85e6d8f3bde51"
] | [
"colour/colorimetry/dataset/illuminants/hunterlab.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\nHunterLab Dataset\n=================\n\nDefines the *HunterLab* illuminants dataset for the\n*CIE 1931 2 Degree Standard Observer* and\n*CIE 1964 10 Degree Standard Observer*.\n\nThe currently implemented data has been extracted from :cite:`HunterLab2008b`,\nhowever you may want to use different data according to the tables given in\n:cite:`HunterLab2008c`.\n\nSee Also\n--------\n`Illuminants Jupyter Notebook\n<http://nbviewer.jupyter.org/github/colour-science/colour-notebooks/\\\nblob/master/notebooks/colorimetry/illuminants.ipynb>`_\n\nReferences\n----------\n- :cite:`HunterLab2008b` : HunterLab. (2008). Hunter L,a,b Color Scale.\n Retrieved from http://www.hunterlab.se/wp-content/uploads/2012/11/\\\nHunter-L-a-b.pdf\n- :cite:`HunterLab2008c` : HunterLab. (2008). Illuminant Factors in Universal\n Software and EasyMatch Coatings. Retrieved from\n https://support.hunterlab.com/hc/en-us/article_attachments/201437785/\\\nan02_02.pdf\n\"\"\"\n\nfrom __future__ import division, unicode_literals\n\nimport numpy as np\nfrom collections import namedtuple\n\nfrom colour.utilities import CaseInsensitiveMapping\n\n__author__ = 'Colour Developers'\n__copyright__ = 'Copyright (C) 2013-2019 - Colour Developers'\n__license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause'\n__maintainer__ = 'Colour Developers'\n__email__ = '[email protected]'\n__status__ = 'Production'\n\n__all__ = [\n 'HunterLab_Illuminant_Specification',\n 'HUNTERLAB_ILLUMINANTS_CIE_1931_2_DEGREE_STANDARD_OBSERVER_DATA',\n 'HUNTERLAB_ILLUMINANTS_CIE_1931_2_DEGREE_STANDARD_OBSERVER',\n 'HUNTERLAB_ILLUMINANTS_CIE_1964_10_DEGREE_STANDARD_OBSERVER_DATA',\n 'HUNTERLAB_ILLUMINANTS_CIE_1964_10_DEGREE_STANDARD_OBSERVER',\n 'HUNTERLAB_ILLUMINANTS'\n]\n\nHunterLab_Illuminant_Specification = namedtuple(\n 'HunterLab_Illuminant_Specification', ('name', 'XYZ_n', 'K_ab'))\n\n# yapf: disable\nHUNTERLAB_ILLUMINANTS_CIE_1931_2_DEGREE_STANDARD_OBSERVER_DATA = (\n ('A', np.array([109.83, 100.00, 35.55]), np.array([185.20, 38.40])),\n ('C', np.array([98.04, 100.00, 118.11]), np.array([175.00, 70.00])),\n ('D50', np.array([96.38, 100.00, 82.45]), np.array([173.51, 58.48])),\n ('D60', np.array([95.23, 100.00, 100.86]), np.array([172.47, 64.72])),\n ('D65', np.array([95.02, 100.00, 108.82]), np.array([172.30, 67.20])),\n ('D75', np.array([94.96, 100.00, 122.53]), np.array([172.22, 71.30])),\n ('FL2', np.array([98.09, 100.00, 67.53]), np.array([175.00, 52.90])),\n ('TL 4', np.array([101.40, 100.00, 65.90]), np.array([178.00, 52.30])),\n ('UL 3000', np.array([107.99, 100.00, 33.91]), np.array([183.70, 37.50])))\n# yapf: enable\n\nHUNTERLAB_ILLUMINANTS_CIE_1931_2_DEGREE_STANDARD_OBSERVER = (\n CaseInsensitiveMapping({\n x[0]: HunterLab_Illuminant_Specification(*x)\n for x in HUNTERLAB_ILLUMINANTS_CIE_1931_2_DEGREE_STANDARD_OBSERVER_DATA\n }))\n\"\"\"\n*Hunter L,a,b* illuminant dataset for *CIE 1931 2 Degree Standard Observer*.\n\nReferences\n----------\n:cite:`HunterLab2008b`, :cite:`HunterLab2008c`\n\nHUNTERLAB_ILLUMINANTS_CIE_1931_2_DEGREE_STANDARD_OBSERVER :\n CaseInsensitiveMapping\n\"\"\"\n\n# yapf: disable\nHUNTERLAB_ILLUMINANTS_CIE_1964_10_DEGREE_STANDARD_OBSERVER_DATA = (\n ('A', np.array([111.16, 100.00, 35.19]), np.array([186.30, 38.20])),\n ('C', np.array([97.30, 100.00, 116.14]), np.array([174.30, 69.40])),\n ('D50', np.array([96.72, 100.00, 81.45]), np.array([173.82, 58.13])),\n ('D60', np.array([95.21, 100.00, 99.60]), np.array([172.45, 64.28])),\n ('D65', np.array([94.83, 100.00, 107.38]), np.array([172.10, 66.70])),\n ('D75', np.array([94.45, 100.00, 120.70]), np.array([171.76, 70.76])),\n ('FL2', np.array([102.13, 100.00, 69.37]), np.array([178.60, 53.60])),\n ('TL 4', np.array([103.82, 100.00, 66.90]), np.array([180.10, 52.70])),\n ('UL 3000', np.array([111.12, 100.00, 35.21]), np.array([186.30, 38.20])))\n# yapf: enable\n\nHUNTERLAB_ILLUMINANTS_CIE_1964_10_DEGREE_STANDARD_OBSERVER = (\n CaseInsensitiveMapping({\n x[0]: HunterLab_Illuminant_Specification(*x)\n for x in\n HUNTERLAB_ILLUMINANTS_CIE_1964_10_DEGREE_STANDARD_OBSERVER_DATA\n }))\n\"\"\"\n*Hunter L,a,b* illuminant dataset for *CIE 1964 10 Degree Standard Observer*.\n\nReferences\n----------\n:cite:`HunterLab2008b`, :cite:`HunterLab2008c`\n\nHUNTERLAB_ILLUMINANTS_CIE_1964_10_DEGREE_STANDARD_OBSERVER :\n CaseInsensitiveMapping\n\"\"\"\n\nHUNTERLAB_ILLUMINANTS = CaseInsensitiveMapping({\n 'CIE 1931 2 Degree Standard Observer':\n HUNTERLAB_ILLUMINANTS_CIE_1931_2_DEGREE_STANDARD_OBSERVER,\n 'CIE 1964 10 Degree Standard Observer':\n HUNTERLAB_ILLUMINANTS_CIE_1964_10_DEGREE_STANDARD_OBSERVER\n})\nHUNTERLAB_ILLUMINANTS.__doc__ = \"\"\"\nAggregated *Hunter L,a,b* illuminant dataset.\n\nReferences\n----------\n:cite:`HunterLab2008b`, :cite:`HunterLab2008c`\n\nHUNTERLAB_ILLUMINANTS : CaseInsensitiveMapping\n **{'CIE 1931 2 Degree Standard Observer',\n 'CIE 1964 10 Degree Standard Observer'}**\n\nAliases:\n\n- 'cie_2_1931': 'CIE 1931 2 Degree Standard Observer'\n- 'cie_10_1964': 'CIE 1964 10 Degree Standard Observer'\n\"\"\"\nHUNTERLAB_ILLUMINANTS['cie_2_1931'] = (\n HUNTERLAB_ILLUMINANTS['CIE 1931 2 Degree Standard Observer'])\nHUNTERLAB_ILLUMINANTS['cie_10_1964'] = (\n HUNTERLAB_ILLUMINANTS['CIE 1964 10 Degree Standard Observer'])\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
robertokcanale/ros_workspace_handsnet | [
"09672bf2b4c54d0064f339005dc5eb3ac4f9d80d"
] | [
"src/handsnet_time/src/tactile_image_publisher_1.py"
] | [
"#!/usr/bin/env python\nimport rospy\n#import tensorflow as tf\nfrom PIL import Image\nfrom sensor_msgs.msg import Image as TactileImage\nimport numpy as np\n\n\nif __name__ == '__main__':\n\n pub = rospy.Publisher('tactile_image1', TactileImage, queue_size=10)\n rospy.init_node('tactile_image_publisher1')\n rate = rospy.Rate(1000) # 1hz\n\n #publishing_my_image, in this case a loop of images\n while not rospy.is_shutdown():\n for i in range(1, 25):\n im_name='src/handsnet_time/data/'+str(i)+'.png'\n #PIL image\n im = Image.open(im_name)\n im = im.convert('RGB')\n im = im.resize((68,100), Image.ANTIALIAS) #I have to write it like this so that when the message is sent i get 100,68,3\n #sensor_msgs.msg.Image\n tactile_image = TactileImage()\n tactile_image.header.stamp = rospy.Time.now()\n tactile_image.height = im.height\n tactile_image.width = im.width\n tactile_image.encoding = \"rgb8\"\n tactile_image.is_bigendian = False\n tactile_image.step = 3 * im.width # Full row length in bytes\n tactile_image.data = np.array(im).tobytes()\n pub.publish(tactile_image)\n rate.sleep()\n #also, I need something of the kind PIL.Image.Image\n #tested it, and it wants a PIL image, don't forget to place the GPU stuff\n #model = tf.keras.models.load_model('src/handsnet/data/HandsNet_2_97.h5')\n #input_arr= tf.keras.preprocessing.image.img_to_array(im)\n #input_arr = np.array([input_arr]) \n #predictions = model.predict(input_arr)\n #print(predictions)\n\n\n\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
orionr/R2Plus1D | [
"54d93222f80cd23a87c7dbda6044b3fae07b3fda"
] | [
"tools/extract_features.py"
] | [
"# Copyright 2018-present, Facebook, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\nfrom __future__ import print_function\n\nfrom caffe2.python import workspace, cnn, core, data_parallel_model\nimport models.model_builder as model_builder\nimport utils.model_helper as model_helper\nimport utils.model_loader as model_loader\n\nimport numpy as np\nimport logging\nimport argparse\nimport os.path\nimport pickle\nimport sys\n\nfrom caffe2.proto import caffe2_pb2\n\nlogging.basicConfig()\nlog = logging.getLogger(\"feature_extractor\")\nlog.setLevel(logging.INFO)\n\n# Output logs to stdout as well, as they get lost in the ffmpeg read errors\nstdout_ch = logging.StreamHandler(sys.stdout)\nstdout_ch.setLevel(logging.INFO)\nlog.addHandler(stdout_ch)\n\n\ndef ExtractFeatures(args):\n if args.gpus is not None:\n gpus = [int(x) for x in args.gpus.split(',')]\n num_gpus = len(gpus)\n else:\n gpus = range(args.num_gpus)\n num_gpus = args.num_gpus\n\n if num_gpus > 0:\n log.info(\"Running on GPUs: {}\".format(gpus))\n else:\n log.info(\"Running on CPU\")\n\n log.info(\"Running on GPUs: {}\".format(gpus))\n\n my_arg_scope = {\n 'order': 'NCHW',\n 'use_cudnn': True,\n 'cudnn_exhaustive_search': True\n }\n\n model = cnn.CNNModelHelper(\n name=\"Extract Features\",\n **my_arg_scope\n )\n\n reader, num_examples = model_builder.create_data_reader(\n model,\n name=\"reader\",\n input_data=args.test_data,\n )\n\n def input_fn(model):\n model_helper.AddVideoInput(\n model,\n reader,\n batch_size=args.batch_size,\n clip_per_video=args.clip_per_video,\n decode_type=args.decode_type,\n length_rgb=args.clip_length_rgb,\n sampling_rate_rgb=args.sampling_rate_rgb,\n scale_h=args.scale_h,\n scale_w=args.scale_w,\n crop_size=args.crop_size,\n num_decode_threads=args.num_decode_threads,\n num_of_class=args.num_labels,\n random_mirror=False,\n random_crop=False,\n input_type=args.input_type,\n length_of=args.clip_length_of,\n sampling_rate_of=args.sampling_rate_of,\n frame_gap_of=args.frame_gap_of,\n do_flow_aggregation=args.do_flow_aggregation,\n flow_data_type=args.flow_data_type,\n get_rgb=(args.input_type == 0),\n get_optical_flow=(args.input_type == 1),\n get_video_id=args.get_video_id,\n use_local_file=args.use_local_file,\n )\n\n def create_model_ops(model, loss_scale):\n return model_builder.build_model(\n model=model,\n model_name=args.model_name,\n model_depth=args.model_depth,\n num_labels=args.num_labels,\n num_channels=args.num_channels,\n crop_size=args.crop_size,\n clip_length=(\n args.clip_length_of if args.input_type == 1\n else args.clip_length_rgb\n ),\n loss_scale=loss_scale,\n is_test=1,\n )\n\n if num_gpus > 0:\n data_parallel_model.Parallelize_GPU(\n model,\n input_builder_fun=input_fn,\n forward_pass_builder_fun=create_model_ops,\n param_update_builder_fun=None, # 'None' since we aren't training\n devices=gpus,\n )\n else:\n model._device_type = caffe2_pb2.CPU\n model._devices = [0]\n device_opt = core.DeviceOption(model._device_type, 0)\n with core.DeviceScope(device_opt):\n with core.NameScope(\"{}_{}\".format(\"gpu\", 0)):\n input_fn(model)\n create_model_ops(model, 1.0)\n\n workspace.RunNetOnce(model.param_init_net)\n workspace.CreateNet(model.net)\n\n if args.db_type == 'minidb':\n if num_gpus > 0:\n model_helper.LoadModel(args.load_model_path, args.db_type)\n data_parallel_model.FinalizeAfterCheckpoint(model)\n else:\n with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 0)):\n model_helper.LoadModel(args.load_model_path, args.db_type)\n elif args.db_type == 'pickle':\n if num_gpus > 0:\n model_loader.LoadModelFromPickleFile(\n model,\n args.load_model_path,\n use_gpu=True,\n root_gpu_id=gpus[0]\n )\n else:\n model_loader.LoadModelFromPickleFile(\n model,\n args.load_model_path,\n use_gpu=False,\n )\n else:\n log.warning(\"Unsupported db_type: {}\".format(args.db_type))\n\n def fetchActivations(model, outputs, num_iterations):\n\n all_activations = {}\n for counter in range(num_iterations):\n workspace.RunNet(model.net.Proto().name)\n num_devices = 1 # default for cpu\n if num_gpus > 0:\n num_devices = num_gpus\n\n for g in range(num_devices):\n for output_name in outputs:\n blob_name = 'gpu_{}/'.format(g) + output_name\n activations = workspace.FetchBlob(blob_name)\n if output_name not in all_activations:\n all_activations[output_name] = []\n all_activations[output_name].append(activations)\n\n if counter % 20 == 0:\n log.info('{}/{} iterations'.format(counter, num_iterations))\n\n # each key holds a list of activations obtained from each minibatch.\n # we now concatenate these lists to get the final arrays.\n # concatenating during the loop requires a realloc and can get slow.\n for key in all_activations:\n all_activations[key] = np.concatenate(all_activations[key])\n\n return all_activations\n\n outputs = [name.strip() for name in args.features.split(',')]\n assert len(outputs) > 0\n\n if args.num_iterations > 0:\n num_iterations = args.num_iterations\n else:\n if num_gpus > 0:\n examples_per_iteration = args.batch_size * num_gpus\n else:\n examples_per_iteration = args.batch_size\n num_iterations = int(num_examples / examples_per_iteration)\n\n activations = fetchActivations(model, outputs, num_iterations)\n\n # saving extracted features\n for index in range(len(outputs)):\n log.info(\n \"Read '{}' with shape {}\".format(\n outputs[index],\n activations[outputs[index]].shape\n )\n )\n\n if args.output_path:\n output_path = args.output_path\n else:\n output_path = os.path.dirname(args.test_data) + '/features.pickle'\n\n log.info('Writing to {}'.format(output_path))\n with open(output_path, 'wb') as handle:\n pickle.dump(activations, handle)\n\n # perform sanity check\n if args.sanity_check == 1: # check clip accuracy\n clip_acc = 0\n softmax = activations['softmax']\n label = activations['label']\n for i in range(len(softmax)):\n sorted_preds = \\\n np.argsort(softmax[i])\n sorted_preds[:] = sorted_preds[::-1]\n if sorted_preds[0] == label[i]:\n clip_acc += 1\n log.info('Sanity check --- clip accuracy: {}'.format(\n clip_acc / len(softmax))\n )\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Simple feature extraction\"\n )\n parser.add_argument(\"--db_type\", type=str, default='pickle',\n help=\"Db type of the testing model\")\n parser.add_argument(\"--model_name\", type=str, default='r2plus1d',\n help=\"Model name\")\n parser.add_argument(\"--model_depth\", type=int, default=18,\n help=\"Model depth\")\n parser.add_argument(\"--gpus\", type=str, default=None,\n help=\"Comma separated list of GPU devices to use\")\n parser.add_argument(\"--num_gpus\", type=int, default=1,\n help=\"Number of GPU devices (instead of --gpus)\")\n parser.add_argument(\"--scale_h\", type=int, default=128,\n help=\"Scale image height to\")\n parser.add_argument(\"--scale_w\", type=int, default=171,\n help=\"Scale image width to\")\n parser.add_argument(\"--crop_size\", type=int, default=112,\n help=\"Input image size (to crop to)\")\n parser.add_argument(\"--clip_length_rgb\", type=int, default=4,\n help=\"Length of input clips\")\n parser.add_argument(\"--sampling_rate_rgb\", type=int, default=1,\n help=\"Frame sampling rate\")\n parser.add_argument(\"--num_labels\", type=int, default=101,\n help=\"Number of labels\")\n parser.add_argument(\"--num_channels\", type=int, default=3,\n help=\"Number of channels\")\n parser.add_argument(\"--batch_size\", type=int, default=32,\n help=\"Batch size, per-GPU\")\n parser.add_argument(\"--load_model_path\", type=str, default='',\n required=True,\n help=\"Load saved model for testing\")\n parser.add_argument(\"--test_data\", type=str, default=\"\", required=True,\n help=\"Dataset on which we will extract features\")\n parser.add_argument(\"--output_path\", type=str, default=\"\",\n help=\"Path to output pickle; defaults to \" +\n \"features.pickle next to <test_data>\")\n parser.add_argument(\"--use_cudnn\", type=int, default=1,\n help=\"Use CuDNN\")\n parser.add_argument(\"--features\", type=str, default=\"final_avg\",\n help=\"Comma-separated list of blob names to fetch\")\n parser.add_argument(\"--num_iterations\", type=int, default=-1,\n help=\"Run only this many iterations\")\n parser.add_argument(\"--num_decode_threads\", type=int, default=4,\n help=\"\")\n parser.add_argument(\"--clip_length_of\", type=int, default=8,\n help=\"Frames of optical flow data\")\n parser.add_argument(\"--sampling_rate_of\", type=int, default=2,\n help=\"Sampling rate for optial flows\")\n parser.add_argument(\"--frame_gap_of\", type=int, default=2,\n help=\"Frame gap of optical flows\")\n parser.add_argument(\"--input_type\", type=int, default=0,\n help=\"0=rgb, 1=optical flow\")\n parser.add_argument(\"--flow_data_type\", type=int, default=0,\n help=\"0=Flow2C, 1=Flow3C, 2=FlowWithGray, \" +\n \"3=FlowWithRGB\")\n parser.add_argument(\"--do_flow_aggregation\", type=int, default=0,\n help=\"whether to aggregate optical flow across \" +\n \"multiple frames\")\n parser.add_argument(\"--clip_per_video\", type=int, default=1,\n help=\"When clips_per_video > 1, sample this many \" +\n \"clips uniformly in time\")\n parser.add_argument(\"--get_video_id\", type=int, default=0,\n help=\"Output video id\")\n parser.add_argument(\"--sanity_check\", type=int, default=0,\n help=\"Sanity check on the accuracy/auc\")\n parser.add_argument(\"--decode_type\", type=int, default=2,\n help=\"0: random, 1: uniform sampling, \" +\n \"2: use starting frame\")\n parser.add_argument(\"--use_local_file\", type=int, default=0,\n help=\"Use lmdb as a list of local filenames\")\n\n args = parser.parse_args()\n log.info(args)\n\n assert model_builder.model_validation(\n args.model_name,\n args.model_depth,\n args.clip_length_of if args.input_type == 1 else args.clip_length_rgb,\n args.crop_size\n )\n\n ExtractFeatures(args)\n\n\nif __name__ == '__main__':\n workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])\n main()\n"
] | [
[
"numpy.concatenate",
"numpy.argsort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
quasiben/cucim | [
"048d53e6f99c2129b9febd08e0ae6d1b37d74451"
] | [
"python/cucim/src/cucim/skimage/restoration/tests/test_restoration.py"
] | [
"import cupy as cp\nimport numpy as np\nimport pytest\nfrom cupy import testing\nfrom cupyx.scipy import ndimage as ndi\nfrom scipy import signal\n\nfrom cucim.skimage import restoration\nfrom cucim.skimage._shared.testing import fetch\nfrom cucim.skimage.color import rgb2gray\nfrom cucim.skimage.restoration import uft\n\n\ndef camera():\n import skimage\n import skimage.data\n\n return cp.asarray(skimage.img_as_float(skimage.data.camera()))\n\n\ndef astronaut():\n import skimage\n import skimage.data\n\n return cp.asarray(skimage.img_as_float(skimage.data.astronaut()))\n\n\ntest_img = camera()\n\n\[email protected]('dtype', [cp.float32, cp.float64])\ndef test_wiener(dtype):\n psf = np.ones((5, 5)) / 25\n data = signal.convolve2d(cp.asnumpy(test_img), psf, \"same\")\n np.random.seed(0)\n data += 0.1 * data.std() * np.random.standard_normal(data.shape)\n\n psf = cp.asarray(psf, dtype=dtype)\n data = cp.asarray(data, dtype=dtype)\n\n deconvolved = restoration.wiener(data, psf, 0.05)\n assert deconvolved.dtype == dtype\n\n path = fetch('restoration/tests/camera_wiener.npy')\n rtol = 1e-5 if dtype == np.float32 else 1e-12\n atol = rtol\n cp.testing.assert_allclose(\n deconvolved, np.load(path), rtol=rtol, atol=atol)\n\n _, laplacian = uft.laplacian(2, data.shape)\n otf = uft.ir2tf(psf, data.shape, is_real=False)\n deconvolved = restoration.wiener(data, otf, 0.05,\n reg=laplacian,\n is_real=False)\n cp.testing.assert_allclose(cp.real(deconvolved),\n np.load(path),\n rtol=rtol, atol=atol)\n\n\[email protected]('dtype', [cp.float32, cp.float64])\ndef test_unsupervised_wiener(dtype):\n psf = np.ones((5, 5)) / 25\n data = signal.convolve2d(cp.asnumpy(test_img), psf, 'same')\n np.random.seed(0)\n data += 0.1 * data.std() * np.random.standard_normal(data.shape)\n\n psf = cp.asarray(psf, dtype=dtype)\n data = cp.asarray(data, dtype=dtype)\n deconvolved, _ = restoration.unsupervised_wiener(data, psf)\n assert deconvolved.dtype == dtype\n\n # CuPy Backend: Cannot use the following comparison to scikit-image data\n # due to different random values generated by cp.random\n # within unsupervised_wiener.\n # Verified similar appearance qualitatively.\n # path = fetch(\"restoration/tests/camera_unsup.npy\")\n # cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)\n\n _, laplacian = uft.laplacian(2, data.shape)\n otf = uft.ir2tf(psf, data.shape, is_real=False)\n\n np.random.seed(0)\n deconvolved = restoration.unsupervised_wiener( # noqa\n data,\n otf,\n reg=laplacian,\n is_real=False,\n user_params={\"callback\": lambda x: None},\n )[0]\n\n # CuPy Backend: Cannot use the following comparison to scikit-image data\n # due to different random values generated by cp.random\n # within unsupervised_wiener.\n # Verified similar appearance qualitatively.\n # path = fetch(\"restoration/tests/camera_unsup2.npy\")\n # cp.testing.assert_allclose(cp.real(deconvolved), np.load(path), rtol=1e-3)\n\n\[email protected]_requires(\"skimage>=1.18\")\ndef test_image_shape():\n \"\"\"Test that shape of output image in deconvolution is same as input.\n\n This addresses issue #1172.\n \"\"\"\n point = cp.zeros((5, 5), np.float)\n point[2, 2] = 1.0\n psf = ndi.gaussian_filter(point, sigma=1.0)\n # image shape: (45, 45), as reported in #1172\n image = cp.asarray(test_img[65:165, 215:315]) # just the face\n image_conv = ndi.convolve(image, psf)\n deconv_sup = restoration.wiener(image_conv, psf, 1)\n deconv_un = restoration.unsupervised_wiener(image_conv, psf)[0]\n # test the shape\n assert image.shape == deconv_sup.shape\n assert image.shape == deconv_un.shape\n # test the reconstruction error\n sup_relative_error = cp.abs(deconv_sup - image) / image\n un_relative_error = cp.abs(deconv_un - image) / image\n cp.testing.assert_array_less(cp.median(sup_relative_error), 0.1)\n cp.testing.assert_array_less(cp.median(un_relative_error), 0.1)\n\n\ndef test_richardson_lucy():\n rstate = np.random.RandomState(0)\n psf = np.ones((5, 5)) / 25\n data = signal.convolve2d(cp.asnumpy(test_img), psf, 'same')\n np.random.seed(0)\n data += 0.1 * data.std() * rstate.standard_normal(data.shape)\n\n data = cp.asarray(data)\n psf = cp.asarray(psf)\n deconvolved = restoration.richardson_lucy(data, psf, 5)\n\n path = fetch('restoration/tests/camera_rl.npy')\n cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-5)\n\n\[email protected]('dtype_image', [np.float32, np.float64])\[email protected]('dtype_psf', [np.float32, np.float64])\[email protected]_requires(\"scikit-image>=0.18\")\ndef test_richardson_lucy_filtered(dtype_image, dtype_psf):\n if dtype_image == np.float64:\n atol = 1e-8\n else:\n atol = 1e-4\n\n test_img_astro = rgb2gray(astronaut())\n\n psf = cp.ones((5, 5), dtype=dtype_psf) / 25\n data = cp.array(\n signal.convolve2d(cp.asnumpy(test_img_astro), cp.asnumpy(psf), 'same'),\n dtype=dtype_image)\n deconvolved = restoration.richardson_lucy(data, psf, 5,\n filter_epsilon=1e-6)\n assert deconvolved.dtype == data.dtype\n\n path = fetch('restoration/tests/astronaut_rl.npy')\n cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3,\n atol=atol)\n"
] | [
[
"numpy.random.seed",
"numpy.random.standard_normal",
"numpy.ones",
"numpy.load",
"numpy.random.RandomState"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
biofreack/data_scrapper | [
"132edaaafce7f676f27f519883710f5e3345846b"
] | [
"data_scrapper/__main__.py"
] | [
"from bs4 import BeautifulSoup\nimport requests\nimport pandas as pd\n\nfrom .db_config import Session\nfrom .db_models import OhlcvdataModel\nfrom .db_config import engine\n\n#coin_symbols = [\"neo\", \"eos\", \"stellar\", \"tron\", \"cardano\", \"monero\", \"nem\", \"siacoin\", \"verge\", \"digibyte\", \"stratis\", \"reddcoin\", \"dash\", \"zcash\", \"tezos\", \"vechain\"]\ncoin_symbols = [\"bitcoin\", \"ethereum\", \"ethereum-classic\", \"iota\", \"ripple\", \"litecoin\"]\n\ndef tes_insert():\n print('Hello World')\n\n session = Session()\n new_rec = OhlcvdataModel(\n name='bitfinex'\n )\n session.add(new_rec)\n session.commit()\n\n\ndef add_record(line):\n session = Session()\n new_rec = OhlcvdataModel(\n name='bitfinex'\n )\n session.add(new_rec)\n session.commit()\n\ndef main():\n for coin in coin_symbols:\n print(f\"coin are valoarea {coin}\")\n url = f\"https://coinmarketcap.com/currencies/{coin}/historical-data/?start=20130428&end=20190111\"\n content = requests.get(url).content\n soup = BeautifulSoup(content,'html.parser')\n table = soup.find('table', {'class': 'table'})\n \n data = [[td.text.strip() for td in tr.findChildren('td')]\n for tr in table.findChildren('tr')]\n df = pd.DataFrame(data)\n df.drop(df.index[0], inplace=True) # first row is empty\n df[0] = pd.to_datetime(df[0]) # date\n for i in range(1,7):\n df[i] = pd.to_numeric(df[i].str.replace(\",\",\"\").str.replace(\"-\",\"\")) # some vol is missing and has -\n df.columns = ['Date','Open','High','Low','Close','Volume','Market Cap']\n # import pdb;pdb.set_trace()\n df.set_index('Date',inplace=True)\n df.sort_index(inplace=True)\n df.insert(loc=0, column='name', value=coin)\n excel_name = f'/home/husky/coincapmarket/{coin}.xls'\n export_excel = df.to_excel (excel_name, index = True, header=True)\n df.to_sql('ohlcvdata2', engine, if_exists='append')\n\nif __name__ == '__main__':\n main()\n\n"
] | [
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
raybellwaves/intake | [
"8acc70d9adb19344ca15dee948315828b61e87b2"
] | [
"intake/catalog/tests/test_local.py"
] | [
"#-----------------------------------------------------------------------------\n# Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors\n# All rights reserved.\n#\n# The full license is in the LICENSE file, distributed with this software.\n#-----------------------------------------------------------------------------\n\nimport datetime\nimport os.path\nimport shutil\nimport tempfile\nimport time\n\nimport pytest\n\nimport pandas\n\nfrom .util import assert_items_equal\nfrom intake import open_catalog\nfrom intake.catalog import exceptions, local\nfrom intake.catalog.local import get_dir, UserParameter, LocalCatalogEntry\nfrom intake.utils import make_path_posix\n\n\ndef abspath(filename):\n return make_path_posix(\n os.path.join(os.path.dirname(__file__), filename))\n\n\ndef test_local_catalog(catalog1):\n assert_items_equal(list(catalog1),\n ['use_example1', 'nested', 'entry1', 'entry1_part',\n 'remote_env', 'local_env', 'text', 'arr', 'datetime'])\n assert len(catalog1) == 9\n assert catalog1['entry1'].describe() == {\n 'name': 'entry1',\n 'container': 'dataframe',\n 'direct_access': 'forbid',\n 'user_parameters': [],\n 'description': 'entry1 full',\n 'args': {'urlpath': '{{ CATALOG_DIR }}/entry1_*.csv'},\n 'metadata': {'bar': [1, 2, 3], 'foo': 'bar'},\n 'plugin': ['csv'],\n 'driver': ['csv']\n }\n assert catalog1['entry1_part'].describe() == {\n 'name': 'entry1_part',\n 'container': 'dataframe',\n 'user_parameters': [\n {\n 'name': 'part',\n 'description': 'part of filename',\n 'default': '1',\n 'type': 'str',\n 'allowed': ['1', '2'],\n }\n ],\n 'description': 'entry1 part',\n 'direct_access': 'allow',\n 'args': {'urlpath': '{{ CATALOG_DIR }}/entry1_{{ part }}.csv'},\n 'metadata': {'foo': 'baz', 'bar': [2, 4, 6]},\n 'plugin': ['csv'],\n 'driver': ['csv']\n }\n assert catalog1['entry1'].container == 'dataframe'\n md = catalog1['entry1'].metadata\n md.pop('catalog_dir')\n assert md['foo'] == 'bar'\n assert md['bar'] == [1, 2, 3]\n\n # Use default parameters\n assert catalog1['entry1_part'].container == 'dataframe'\n # Specify parameters\n assert catalog1['entry1_part'].configure_new(part='2').container == 'dataframe'\n\n\ndef test_get_items(catalog1):\n for key, entry in catalog1.items():\n assert catalog1[key].describe() == entry.describe()\n\n\ndef test_nested(catalog1):\n assert 'nested' in catalog1\n assert 'entry1' in catalog1.nested.nested()\n assert catalog1.entry1.read().equals(catalog1.nested.nested.entry1.read())\n assert 'nested.nested' not in catalog1.walk(depth=1)\n assert 'nested.nested' in catalog1.walk(depth=2)\n assert catalog1.nested.cat == catalog1\n assert catalog1.nested.nested.nested.cat.cat.cat is catalog1\n\n\ndef test_nested_gets_name_from_super(catalog1):\n assert catalog1.name == 'name_in_cat'\n assert 'nested' in catalog1\n nested = catalog1.nested\n assert nested.name == 'nested'\n assert nested().name == 'nested'\n\n\ndef test_hash(catalog1):\n assert catalog1.nested() == catalog1.nested.nested()\n\n\ndef test_getitem(catalog1):\n assert list(catalog1) == list(catalog1['nested']())\n assert list(catalog1) == list(catalog1['nested.nested']())\n assert list(catalog1) == list(catalog1['nested', 'nested']())\n\n\ndef test_source_plugin_config(catalog1):\n from intake import registry\n assert 'example1' in registry\n assert 'example2' in registry\n\n\ndef test_metadata(catalog1):\n assert hasattr(catalog1, 'metadata')\n assert catalog1.metadata['test'] is True\n\n\ndef test_use_source_plugin_from_config(catalog1):\n catalog1['use_example1']\n\n\ndef test_get_dir():\n assert get_dir('file:///path/catalog.yml') == 'file:///path'\n assert get_dir('https://example.com/catalog.yml') == 'https://example.com'\n path = 'example/catalog.yml'\n out = get_dir(path)\n assert os.path.isabs(out)\n assert out.endswith('/example/')\n path = '/example/catalog.yml'\n out = get_dir(path)\n # it's ok if the first two chars indicate drive for win (C:)\n assert '/example/' in [out, out[2:]]\n path = 'example'\n out = get_dir(path)\n assert os.path.isabs(out)\n assert not out.endswith('/example')\n assert out.endswith('/')\n\n\ndef test_entry_dir_function(catalog1):\n assert 'nested' in dir(catalog1.nested)\n\n\[email protected](\"dtype,expected\", [\n (\"bool\", False),\n (\"datetime\", pandas.Timestamp(1970, 1, 1, 0, 0, 0)),\n (\"float\", 0.0),\n (\"int\", 0),\n (\"list\", []),\n (\"str\", \"\"),\n (\"unicode\", u\"\"),\n])\ndef test_user_parameter_default_value(dtype, expected):\n p = local.UserParameter('a', 'a desc', dtype)\n assert p.validate(None) == expected\n\n\ndef test_user_parameter_repr():\n p = local.UserParameter('a', 'a desc', 'str')\n expected = \"<UserParameter 'a'>\"\n assert repr(p) == str(p) == expected\n\n\[email protected](\"dtype,given,expected\", [\n (\"bool\", \"true\", True),\n (\"bool\", 0, False),\n (\"datetime\", datetime.datetime(2018, 1, 1, 0, 34, 0), pandas.Timestamp(2018, 1, 1, 0, 34, 0)),\n (\"datetime\", \"2018-01-01 12:34AM\", pandas.Timestamp(2018, 1, 1, 0, 34, 0)),\n (\"datetime\", 1234567890000000000, pandas.Timestamp(2009, 2, 13, 23, 31, 30)),\n (\"float\", \"3.14\", 3.14),\n (\"int\", \"1\", 1),\n (\"list\", (3, 4), [3, 4]),\n (\"str\", 1, \"1\"),\n (\"unicode\", \"foo\", u\"foo\"),\n])\ndef test_user_parameter_coerce_value(dtype, given, expected):\n p = local.UserParameter('a', 'a desc', dtype, given)\n assert p.validate(given) == expected\n\n\[email protected](\"given\", [\"now\", \"today\"])\ndef test_user_parameter_coerce_special_datetime(given):\n p = local.UserParameter('a', 'a desc', 'datetime', given)\n assert type(p.validate(given)) == pandas.Timestamp\n\n\[email protected](\"dtype,given,expected\", [\n (\"float\", \"100.0\", 100.0),\n (\"int\", \"20\", 20),\n (\"int\", 20.0, 20),\n])\ndef test_user_parameter_coerce_min(dtype, given, expected):\n p = local.UserParameter('a', 'a desc', dtype, expected, min=given)\n assert p.min == expected\n\n\[email protected](\"dtype,given,expected\", [\n (\"float\", \"100.0\", 100.0),\n (\"int\", \"20\", 20),\n (\"int\", 20.0, 20),\n])\ndef test_user_parameter_coerce_max(dtype, given, expected):\n p = local.UserParameter('a', 'a desc', dtype, expected, max=given)\n assert p.max == expected\n\n\[email protected](\"dtype,given,expected\", [\n (\"float\", [50, \"100.0\", 150.0], [50.0, 100.0, 150.0]),\n (\"int\", [1, \"2\", 3.0], [1, 2, 3]),\n])\ndef test_user_parameter_coerce_allowed(dtype, given, expected):\n p = local.UserParameter('a', 'a desc', dtype, expected[0], allowed=given)\n assert p.allowed == expected\n\n\ndef test_user_parameter_validation_range():\n p = local.UserParameter('a', 'a desc', 'int', 1, min=0, max=3)\n\n with pytest.raises(ValueError) as except_info:\n p.validate(-1)\n assert 'less than' in str(except_info.value)\n\n assert p.validate(0) == 0\n assert p.validate(1) == 1\n assert p.validate(2) == 2\n assert p.validate(3) == 3\n\n with pytest.raises(ValueError) as except_info:\n p.validate(4)\n assert 'greater than' in str(except_info.value)\n\n\ndef test_user_parameter_validation_allowed():\n p = local.UserParameter('a', 'a desc', 'int', 1, allowed=[1, 2])\n\n with pytest.raises(ValueError) as except_info:\n p.validate(0)\n assert 'allowed' in str(except_info.value)\n\n assert p.validate(1) == 1\n assert p.validate(2) == 2\n\n with pytest.raises(ValueError) as except_info:\n p.validate(3)\n assert 'allowed' in str(except_info.value)\n\n\[email protected](\"filename\", [\n \"catalog_non_dict\",\n \"data_source_missing\",\n \"data_source_name_non_string\",\n \"data_source_non_dict\",\n \"data_source_value_non_dict\",\n \"params_missing_required\",\n \"params_name_non_string\",\n \"params_non_dict\",\n \"params_value_bad_choice\",\n \"params_value_bad_type\",\n \"params_value_non_dict\",\n \"plugins_non_dict\",\n \"plugins_source_missing\",\n \"plugins_source_missing_key\",\n \"plugins_source_non_dict\",\n \"plugins_source_non_list\",\n])\ndef test_parser_validation_error(filename):\n with pytest.raises(exceptions.ValidationError):\n list(open_catalog(abspath(filename + \".yml\")))\n\n\[email protected](\"filename\", [\n \"obsolete_data_source_list\",\n \"obsolete_params_list\",\n])\ndef test_parser_obsolete_error(filename):\n with pytest.raises(exceptions.ObsoleteError):\n open_catalog(abspath(filename + \".yml\"))\n\n\ndef test_union_catalog():\n path = os.path.dirname(__file__)\n uri1 = os.path.join(path, 'catalog_union_1.yml')\n uri2 = os.path.join(path, 'catalog_union_2.yml')\n\n union_cat = open_catalog([uri1, uri2])\n\n assert_items_equal(list(union_cat), ['entry1', 'entry1_part', 'use_example1'])\n\n expected = {\n 'name': 'entry1_part',\n 'container': 'dataframe',\n 'user_parameters': [\n {\n 'name': 'part',\n 'description': 'part of filename',\n 'default': '1',\n 'type': 'str',\n 'allowed': ['1', '2'],\n }\n ],\n 'description': 'entry1 part',\n 'direct_access': 'allow'\n }\n for k in expected:\n assert union_cat.entry1_part.describe()[k] == expected[k]\n\n # Implied creation of data source\n assert union_cat.entry1.container == 'dataframe'\n md = union_cat.entry1.describe()['metadata']\n assert md == dict(foo='bar', bar=[1, 2, 3])\n\n # Use default parameters in explict creation of data source\n assert union_cat.entry1_part().container == 'dataframe'\n # Specify parameters in creation of data source\n assert union_cat.entry1_part(part='2').container == 'dataframe'\n\n\ndef test_persist_local_cat(temp_cache):\n # when persisted, multiple cat become one\n from intake.catalog.local import YAMLFileCatalog\n path = os.path.dirname(__file__)\n uri1 = os.path.join(path, 'catalog_union_1.yml')\n uri2 = os.path.join(path, 'catalog_union_2.yml')\n\n s = open_catalog([uri1, uri2])\n s2 = s.persist()\n assert isinstance(s2, YAMLFileCatalog)\n assert set(s) == set(s2)\n\n\ndef test_empty_catalog():\n cat = open_catalog()\n assert list(cat) == []\n\n\ndef test_nonexistent_error():\n with pytest.raises(IOError):\n local.YAMLFileCatalog('nonexistent')\n\n\ndef test_duplicate_data_sources():\n path = os.path.dirname(__file__)\n uri = os.path.join(path, 'catalog_dup_sources.yml')\n\n with pytest.raises(exceptions.DuplicateKeyError):\n open_catalog(uri)\n\n\ndef test_duplicate_parameters():\n path = os.path.dirname(__file__)\n uri = os.path.join(path, 'catalog_dup_parameters.yml')\n\n with pytest.raises(exceptions.DuplicateKeyError):\n open_catalog(uri)\n\n\[email protected]\ndef temp_catalog_file():\n path = tempfile.mkdtemp()\n catalog_file = os.path.join(path, 'catalog.yaml')\n with open(catalog_file, 'w') as f:\n f.write('''\nsources:\n a:\n driver: csv\n args:\n urlpath: /not/a/file\n b:\n driver: csv\n args:\n urlpath: /not/a/file\n ''')\n\n yield catalog_file\n\n shutil.rmtree(path)\n\n\ndef test_catalog_file_removal(temp_catalog_file):\n cat_dir = os.path.dirname(temp_catalog_file)\n cat = open_catalog(cat_dir + '/*', ttl=0.1)\n assert set(cat) == {'a', 'b'}\n\n os.remove(temp_catalog_file)\n time.sleep(0.5) # wait for catalog refresh\n assert set(cat) == set()\n\n\ndef test_flatten_duplicate_error():\n path = tempfile.mkdtemp()\n f1 = os.path.join(path, 'catalog.yaml')\n path = tempfile.mkdtemp()\n f2 = os.path.join(path, 'catalog.yaml')\n for f in [f1, f2]:\n with open(f, 'w') as fo:\n fo.write(\"\"\"\n sources:\n a:\n driver: csv\n args:\n urlpath: /not/a/file\n \"\"\")\n with pytest.raises(ValueError):\n open_catalog([f1, f2])\n\n\ndef test_multi_cat_names():\n fn = abspath(\"catalog_union*.yml\")\n cat = open_catalog(fn)\n assert cat.name == fn\n assert fn in repr(cat)\n\n fn1 = abspath(\"catalog_union_1.yml\")\n fn2 = abspath(\"catalog_union_2.yml\")\n cat = open_catalog([fn1, fn2])\n assert cat.name == '2 files'\n assert cat.description == 'Catalog generated from 2 files'\n\n cat = open_catalog([fn1, fn2], name='special_name',\n description='Special description')\n assert cat.name == 'special_name'\n assert cat.description == 'Special description'\n\n\ndef test_name_of_builtin():\n import intake\n assert intake.cat.name == 'builtin'\n assert intake.cat.description == 'Generated from data packages found on your intake search path'\n\n\ndef test_cat_with_declared_name():\n fn = abspath(\"catalog_named.yml\")\n description = 'Description declared in the open function'\n cat = open_catalog(fn, name='name_in_func', description=description)\n assert cat.name == 'name_in_func'\n assert cat.description == description\n cat._load() # we don't get metadata until load/list/getitem\n assert cat.metadata.get('some') == 'thing'\n\n cat = open_catalog(fn)\n assert cat.name == 'name_in_spec'\n assert cat.description == 'This is a catalog with a description in the yaml'\n\n\ndef test_cat_with_no_declared_name_gets_name_from_dir_if_file_named_catalog():\n fn = abspath(\"catalog.yml\")\n cat = open_catalog(fn, name='name_in_func', description='Description in func')\n assert cat.name == 'name_in_func'\n assert cat.description == 'Description in func'\n\n cat = open_catalog(fn)\n assert cat.name == 'tests'\n assert cat.description == None\n\n\ndef test_default_expansions():\n try:\n os.environ['INTAKE_INT_TEST'] = '1'\n par = UserParameter('', '', 'int', default='env(INTAKE_INT_TEST)')\n par.expand_defaults()\n assert par.expanded_default == 1\n finally:\n del os.environ['INTAKE_INT_TEST']\n\n par = UserParameter('', '', 'str', default='env(USER)')\n par.expand_defaults(getenv=False)\n assert par.expanded_default == 'env(USER)'\n par.expand_defaults()\n assert par.expanded_default == os.getenv('USER', '')\n\n par = UserParameter('', '', 'str', default='client_env(USER)')\n par.expand_defaults()\n assert par.expanded_default == 'client_env(USER)'\n par.expand_defaults(client=True)\n assert par.expanded_default == os.getenv('USER', '')\n\n par = UserParameter('', '', 'str', default='shell(echo success)')\n par.expand_defaults(getshell=False)\n assert par.expanded_default == 'shell(echo success)'\n par.expand_defaults()\n assert par.expanded_default == 'success'\n\n par = UserParameter('', '', 'str', default='client_shell(echo success)')\n par.expand_defaults(client=True)\n assert par.expanded_default == 'success'\n\n par = UserParameter('', '', 'int', default=1)\n par.expand_defaults() # no error from string ops\n\n\ndef test_remote_cat(http_server):\n url = http_server + 'catalog1.yml'\n cat = open_catalog(url)\n assert 'entry1' in cat\n assert cat.entry1.describe()\n\n\ndef test_multi_plugins():\n from intake.source.csv import CSVSource\n fn = abspath('multi_plugins.yaml')\n cat = open_catalog(fn)\n s = cat.tables0()\n assert isinstance(s, CSVSource)\n\n s = cat.tables1()\n assert isinstance(s, CSVSource)\n\n s = cat.tables2()\n assert isinstance(s, CSVSource)\n\n s = cat.tables3()\n assert isinstance(s, CSVSource)\n assert s._csv_kwargs == {}\n\n s = cat.tables3(plugin='myplug')\n assert isinstance(s, CSVSource)\n assert s._csv_kwargs == {}\n\n s = cat.tables3(plugin='myplug2')\n assert isinstance(s, CSVSource)\n assert s._csv_kwargs is True\n\n with pytest.raises(ValueError):\n cat.tables4()\n with pytest.raises(ValueError):\n cat.tables4(plugin='myplug')\n with pytest.raises(ValueError):\n cat.tables4(plugin='myplug2')\n\n s = cat.tables5()\n assert isinstance(s, CSVSource)\n\n with pytest.raises(ValueError):\n cat.tables5(plugin='myplug')\n\n fn = abspath('multi_plugins2.yaml')\n with pytest.raises(ValueError):\n open_catalog(fn)\n\n\ndef test_no_plugins():\n fn = abspath('multi_plugins.yaml')\n cat = open_catalog(fn)\n with pytest.raises(ValueError) as e:\n cat.tables6\n assert 'doesnotexist' in str(e.value)\n assert 'plugin-directory' in str(e.value)\n with pytest.raises(ValueError) as e:\n cat.tables7\n assert 'doesnotexist' in str(e.value)\n\n\ndef test_explicit_entry_driver():\n from intake.source.textfiles import TextFilesSource\n e = LocalCatalogEntry('test', 'desc', TextFilesSource,\n args={'urlpath': None})\n assert e.describe()['container'] == 'python'\n assert isinstance(e(), TextFilesSource)\n\n with pytest.raises(TypeError):\n LocalCatalogEntry('test', 'desc', None)\n\n\ndef test_getitem_and_getattr():\n fn = abspath('multi_plugins.yaml')\n catalog = open_catalog(fn)\n catalog['tables0']\n with pytest.raises(KeyError):\n catalog['doesnotexist']\n with pytest.raises(KeyError):\n catalog['_doesnotexist']\n with pytest.raises(KeyError):\n # This exists as an *attribute* but not as an item.\n catalog['metadata']\n catalog.tables0 # alias to catalog['tables0']\n catalog.metadata # a normal attribute\n with pytest.raises(AttributeError):\n catalog.doesnotexit\n with pytest.raises(AttributeError):\n catalog._doesnotexit\n assert catalog.tables0 == catalog['tables0']\n assert isinstance(catalog.metadata, (dict, type(None)))\n\n\ndef test_dot_names():\n fn = abspath('dot-nest.yaml')\n cat = open_catalog(fn)\n assert cat.self.leaf.description == 'leaf'\n assert cat.self['leafdot.dot'].description == 'leaf-dot'\n assert cat['selfdot.dot', 'leafdot.dot'].description == 'leaf-dot'\n\n assert cat['self.selfdot.dot', 'leafdot.dot'].description == 'leaf-dot'\n assert cat['self.self.dot', 'leafdot.dot'].description == 'leaf-dot'\n assert cat['self.self.dot', 'leaf'].description == 'leaf'\n assert cat['self.self.dot', 'leaf.dot'].description == 'leaf-dot'\n\n assert cat['self.self.dot.leaf.dot'].description == 'leaf-dot'\n\n\ndef test_listing(catalog1):\n assert list(catalog1) == list(catalog1.nested)\n with pytest.raises(TypeError):\n list(catalog1.arr)\n\n\ndef test_dict_save():\n from intake.catalog.base import Catalog\n fn = os.path.join(tempfile.mkdtemp(), 'mycat.yaml')\n entry = LocalCatalogEntry(name='trial', description='get this back',\n driver='csv', args=dict(urlpath=\"\"))\n cat = Catalog.from_dict({'trial': entry}, name='mycat')\n cat.save(fn)\n\n cat2 = open_catalog(fn)\n assert 'trial' in cat2\n assert cat2.name == 'mycat'\n assert \"CSV\" in cat2.trial.classname\n\n\ndef test_dict_save_complex():\n from intake.catalog.base import Catalog\n fn = os.path.join(tempfile.mkdtemp(), 'mycat.yaml')\n cat = Catalog()\n entry = LocalCatalogEntry(name='trial', description='get this back',\n driver='csv', cache=[], catalog=cat,\n parameters=[UserParameter(name='par1', description='desc', type='int')],\n args={'urlpath': 'none'})\n\n cat._entries = {'trial': entry}\n cat.save(fn)\n\n cat2 = open_catalog(fn)\n assert 'trial' in cat2\n assert cat2.name == 'mycat'\n assert cat2.trial.describe()['plugin'][0] == 'csv'\n\n\ndef test_dict_adddel():\n from intake.catalog.base import Catalog\n entry = LocalCatalogEntry(name='trial', description='get this back',\n driver='csv', args=dict(urlpath=\"\"))\n cat = Catalog.from_dict({'trial': entry}, name='mycat')\n assert 'trial' in cat\n cat['trial2'] = entry\n assert list(cat) == ['trial', 'trial2']\n cat.pop('trial')\n assert list(cat) == ['trial2']\n assert cat['trial2'].describe() == entry.describe()\n\n\ndef test_filter():\n from intake.catalog.base import Catalog\n entry1 = LocalCatalogEntry(name='trial', description='get this back',\n driver='csv', args=dict(urlpath=\"\"))\n entry2 = LocalCatalogEntry(name='trial', description='pass this through',\n driver='csv', args=dict(urlpath=\"\"))\n cat = Catalog.from_dict({'trial1': entry1,\n 'trial2': entry2}, name='mycat')\n cat2 = cat.filter(lambda e: 'pass' in e._description)\n assert list(cat2) == ['trial2']\n assert cat2.trial2 == entry2()\n\n\ndef test_from_dict_with_data_source():\n \"Check that Catalog.from_dict accepts DataSources not wrapped in Entry.\"\n from intake.catalog.base import Catalog\n fn = os.path.join(tempfile.mkdtemp(), 'mycat.yaml')\n entry = LocalCatalogEntry(name='trial', description='get this back',\n driver='csv', args=dict(urlpath=\"\"))\n ds = entry()\n cat = Catalog.from_dict({'trial': ds}, name='mycat')\n\n\ndef test_no_instance():\n from intake.catalog.local import LocalCatalogEntry\n\n e0 = LocalCatalogEntry('foo', '', 'fake')\n e1 = LocalCatalogEntry('foo0', '', 'fake')\n\n # this would error on instantiation with driver not found\n assert e0 != e1\n\n\ndef test_fsspec_integration():\n import fsspec\n import pandas as pd\n mem = fsspec.filesystem('memory')\n with mem.open('cat.yaml', 'wt') as f:\n f.write(\"\"\"\nsources:\n implicit:\n driver: csv\n description: o\n args:\n urlpath: \"{{CATALOG_DIR}}/file.csv\"\n explicit:\n driver: csv\n description: o\n args:\n urlpath: \"memory:///file.csv\"\n extra:\n driver: csv\n description: o\n args:\n urlpath: \"{{CATALOG_DIR}}/file.csv\"\n storage_options: {other: option}\"\"\"\n )\n with mem.open('/file.csv', 'wt') as f:\n f.write(\"a,b\\n0,1\")\n expected = pd.DataFrame({'a': [0], 'b': [1]})\n cat = open_catalog(\"memory://cat.yaml\")\n assert list(cat) == ['implicit', 'explicit', 'extra']\n assert cat.implicit.read().equals(expected)\n assert cat.explicit.read().equals(expected)\n s = cat.extra()\n assert s._storage_options['other']\n\n\ndef test_cat_add(tmpdir):\n tmpdir = str(tmpdir)\n fn = os.path.join(tmpdir, 'cat.yaml')\n with open(fn, 'w') as f:\n f.write('sources: {}')\n cat = open_catalog(fn)\n assert list(cat) == []\n\n # was added in memory\n cat.add(cat)\n cat._load() # this would happen automatically, but not immediately\n assert list(cat) == ['cat']\n\n # was added to the file\n cat = open_catalog(fn)\n assert list(cat) == ['cat']\n\n\ndef test_no_entries_items(catalog1):\n from intake.catalog.entry import CatalogEntry\n from intake.source.base import DataSource\n\n for k, v in catalog1.items():\n assert not isinstance(v, CatalogEntry)\n assert isinstance(v, DataSource)\n\n for k in catalog1:\n v = catalog1[k]\n assert not isinstance(v, CatalogEntry)\n assert isinstance(v, DataSource)\n\n for k in catalog1:\n # we can't do attribute access on \"text\" because it\n # collides with a property\n if k == 'text':\n continue\n v = getattr(catalog1, k)\n assert not isinstance(v, CatalogEntry)\n assert isinstance(v, DataSource)\n\n\ndef test_cat_dictlike(catalog1):\n assert list(catalog1) == list(catalog1.keys())\n assert len(list(catalog1)) == len(catalog1)\n assert list(catalog1.items()) == list(zip(catalog1.keys(), catalog1.values()))\n"
] | [
[
"pandas.Timestamp",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Kchour/steinerpy | [
"be6206533b7b28cfb67800ee847f0de367dab834"
] | [
"steinerpy/library/animation/animationV2.py"
] | [
"\"\"\"This module provides a helper class for using matplotlib animations \"\"\"\nimport matplotlib.pyplot as plt\nimport time\n\nimport steinerpy.config as cfg\n \nclass AnimateV2:\n \"\"\"Helper class for matplotlib.pyplot animations\n Class instances are used to keep track of different figures and their\n respective artists\n \n Args:\n figure_number (int): Figure number to put our canvas on\n figure_name (str): NOT IMPLEMENTED YET\n\n Attributes:\n figure_number\n figure_name (str): NOT IMPLEMENTED YET\n background (bbox): The canvas background\n artists (dict): Container of different artists\n canvas (???): The current figure's canvas\n cid (???): Callback object for \"draw_event\"\n\n Todo:\n * Add support for subplots\n * Test usage multiple figures\n\n \"\"\"\n # keep track of figure instances\n instances = {}\n\n def __init__(self, figure_number, figure_name=\"\"):\n self.figure_number = figure_number\n self.figure_name = figure_name\n self.background = None\n self.artists = {}\n\n # grab the background on every draw \n fig = plt.figure(figure_number)\n self.ax = fig.axes[0]\n self.canvas = fig.canvas\n self.cid = self.canvas.mpl_connect(\"draw_event\", self._on_draw)\n\n def _add_artists(self, artist, artist_name, use_line=True):\n if use_line:\n self.artists[artist_name] = {'artist': artist, 'xdata': [], 'ydata': []} \n else:\n self.artists[artist_name] = {'artist': [artist]} \n\n def _on_draw(self, event):\n cv = self.canvas\n fig = cv.figure\n if event is not None:\n if event.canvas != cv:\n raise RuntimeError\n self.background = cv.copy_from_bbox(cv.figure.bbox)\n self._draw_animated()\n cv.blit(fig.bbox)\n\n def _draw_animated(self):\n \"\"\"Draw all of the animated artists.\"\"\"\n fig = self.canvas.figure\n # for a in self.artists.values():\n # fig.draw_artist(a['artist'][0])\n sorted_artist = sorted(self.artists.values(), key=lambda x: x['artist'][0].get_zorder())\n # for a in cls.instances[figure_number].artists.values():\n for a in sorted_artist:\n # Draw artists\n fig.draw_artist(a['artist'][0])\n\n @classmethod\n def get_artist(cls, artist_name, figure_number=1):\n if artist_name in cls.instances[figure_number].artists:\n return cls.instances[figure_number].artists[artist_name]['artist'][0]\n\n @classmethod\n def delete(cls, artist_name, figure_number=1):\n \"\"\"Removes a particular artist from both this class and the axes\"\"\"\n if artist_name in cls.instances[figure_number].artists:\n cls.instances[figure_number].artists[artist_name]['artist'][0].remove()\n del cls.instances[figure_number].artists[artist_name]\n\n # helper method for different user inputs\n @classmethod\n def add_line(cls, *args, **kwargs):\n \"\"\"Add an line artist to a particular class instance \n\n Examples:\n Let x, y be single float values or a list of floats\n >>> AnimateV2.add(\"cos\", x, y, 'bo', markersize=15, zorder=10) #on top\n\n Let d be a 2D list of floats, i.e. [[x1, x2, ...],[y1, y2, ...]]\n >>> AnimateV2.add('cos', d, markersize=5, marker='o')\n >>> AnimateV2.add('cos', d, 'ro', markersize=5)\n\n \"\"\"\n\n if isinstance(args[-1], str):\n #using fmt arguments\n \n # compact?\n if len(args[0:-1]) == 2:\n artist_name, data = args[0], args[1]\n x,y = data[0], data[1]\n\n cls._add(artist_name, x,y, args[-1], **kwargs)\n else:\n #not compact?\n cls._add(*args, **kwargs)\n else:\n #not using fmt args\n \n # compact?\n if len(args)==2:\n artist_name, data = args\n x,y = data[0], data[1]\n cls._add(artist_name, x, y, **kwargs)\n else:\n #not compact\n artist_name, x, y = args\n cls._add(artist_name, x, y, **kwargs)\n\n @classmethod\n def create_new_plot(cls, *args, **kwargs):\n \"\"\"Return fig, ax from subplots function \"\"\"\n \n return plt.subplots(*args, **kwargs) \n\n @classmethod\n def init_figure(cls, fig, ax, figure_number=1, figure_name=\"\", xlim=None, ylim=None):\n \"\"\"Allow the user to manually initialize the figure, they must pass in handles\n\n \"\"\"\n\n #resize to prevent marker clipping on the edges\n if xlim != None and ylim != None:\n ax.set_xlim(xlim[0], xlim[1])\n ax.set_ylim(ylim[0], ylim[1])\n cls.prevent_clipping()\n\n # try setting equal axis?\n ax.axis('equal')\n\n plt.show(block=False) \n plt.pause(0.1)\n\n # Store the background in new class instance\n o = AnimateV2(figure_number=1, figure_name=figure_name)\n # o.background = fig.canvas.copy_from_bbox(ax.bbox)\n cls.instances[figure_number] = o\n\n @classmethod\n def _add(cls, artist_name, x, y, *args, figure_number=1, figure_name=\"\", xlim=None, ylim=None, draw_clean=False, linestyle=\"\", alpha=1, **kwargs):\n \"\"\"Add line2d artist and its data to a particular figure \n\n Args:\n artist_name (str): Name of the line2d artist\n x (list of floats, float): The line2d xdata\n y (list of floats, float): The line2d ydata\n args (str): Format arguments for plot \n xlim (tuple): (xmin, xmax)\n ylim (tuple): (ymin, ymax)\n \n \"\"\"\n # initialization event.canvas.figure.axes[0].has_been_closed = True\n if not plt.fignum_exists(figure_number):\n # Get figure\n fig = plt.figure(figure_number)\n # Add axes\n ax = fig.add_subplot(1,1,1)\n # set limits\n if xlim is None or ylim is None:\n xlim = (-15,15)\n ylim = (-15,15)\n ax.set_xlim(xlim[0], xlim[1])\n ax.set_ylim(ylim[0], ylim[1])\n\n #resize to prevent marker clipping on the edges\n cls.prevent_clipping()\n\n # Draw the canvas once\n # fig.canvas.draw_idle()\n plt.legend() # must have already defined this\n plt.show(block=False) \n plt.pause(0.1)\n\n # Store the background in new class instance\n o = AnimateV2(figure_number=1, figure_name=figure_name)\n o.background = fig.canvas.copy_from_bbox(ax.bbox)\n cls.instances[figure_number] = o\n\n else: \n # Get figure\n fig = plt.figure(figure_number)\n ax = fig.axes[0]\n \n # Detect when figure is closed. then delete everything basically\n cls.cid_closed_fig = fig.canvas.mpl_connect('close_event', cls.on_shutdown)\n\n # Add artist if not yet\n if artist_name not in cls.instances[figure_number].artists:\n if not args:\n if kwargs:\n cls.instances[figure_number]._add_artists(ax.plot(x, y, linestyle=linestyle,**kwargs), artist_name)\n else:\n cls.instances[figure_number]._add_artists(ax.plot(x, y, linestyle=linestyle), artist_name)\n else:\n if kwargs:\n cls.instances[figure_number]._add_artists(ax.plot(x, y, args[0], linestyle=linestyle, **kwargs), artist_name)\n else:\n cls.instances[figure_number]._add_artists(ax.plot(x, y, args[0], linestyle=linestyle), artist_name)\n\n # store data\n if not draw_clean:\n if isinstance(x, float) or isinstance(x, int) or \"int64\" in str(type(x)):\n cls.instances[figure_number].artists[artist_name]['xdata'].append(x)\n cls.instances[figure_number].artists[artist_name]['ydata'].append(y)\n else:\n cls.instances[figure_number].artists[artist_name]['xdata'].extend(x)\n cls.instances[figure_number].artists[artist_name]['ydata'].extend(y)\n else:\n cls.instances[figure_number].artists[artist_name]['xdata'] = x\n cls.instances[figure_number].artists[artist_name]['ydata'] = y\n\n line = cls.instances[figure_number].artists[artist_name]['artist'][0]\n # Set line2d data\n line.set_xdata(cls.instances[figure_number].artists[artist_name]['xdata'])\n line.set_ydata(cls.instances[figure_number].artists[artist_name]['ydata'])\n line.set_alpha(alpha)\n\n @classmethod\n def add_artist_ex(cls, artist, artist_name, figure_number=1):\n \"\"\"Add any user defined artist \"\"\"\n if artist_name not in cls.instances[figure_number].artists:\n artist.set_animated(True)\n cls.instances[figure_number]._add_artists(artist, artist_name, use_line=False)\n\n @classmethod\n def update(cls, figure_number=1):\n # Get figure\n fig = plt.figure(figure_number)\n ax = fig.axes[0]\n\n if cls.instances[figure_number].background is None:\n cls.instances[figure_number]._on_draw(None)\n else:\n # restore background \n fig.canvas.restore_region(cls.instances[figure_number].background)\n # #Respect z order\n # sorted_artist = sorted(cls.instances[figure_number].artists.values(), key=lambda x: x['artist'][0].get_zorder())\n # # for a in cls.instances[figure_number].artists.values():\n # for a in sorted_artist:\n # # Draw artists\n # fig.draw_artist(a['artist'][0])\n \n cls.instances[figure_number]._draw_animated()\n # blit the axes\n fig.canvas.blit(fig.bbox)\n # fig.canvas.update()\n # flush events\n fig.canvas.flush_events()\n # fig.canvas.flush_events()\n # pause if necessary\n if cfg.Animation.animate_delay > 0:\n time.sleep(cfg.Animation.animate_delay)\n\n \n @classmethod\n def on_shutdown(cls, event):\n # When figure is closed, clear out all figure instances\n cls.instances = {}\n\n @classmethod\n def close(cls):\n plt.close()\n\n @classmethod\n def prevent_clipping(cls):\n ### prevent edge clipping of markers. Save ticks, but change limits\n xticks, xticklabels = plt.xticks()\n yticks, yticklabels = plt.yticks()\n\n # shaft half a step to the left\n xmin = (3*xticks[0] - xticks[1])/2.\n # shaft half a step to the right\n xmax = (3*xticks[-1] - xticks[-2])/2.\n\n # shaft half a step below\n ymin = (3*yticks[0] - yticks[1])/2.\n # shaft half a step above\n ymax = (3*yticks[-1] - yticks[-2])/2.\n\n plt.xlim(xmin, xmax)\n plt.xticks(xticks)\n\n plt.ylim(ymin, ymax)\n plt.yticks(yticks)\n\n\n\n\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.fignum_exists",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.close",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.pause",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
malywonsz/txtai | [
"ace1b04161062430887eb2153961abcd819a5afb"
] | [
"src/python/txtai/workflow/task/base.py"
] | [
"\"\"\"\nTask module\n\"\"\"\n\nimport re\n\nimport numpy as np\nimport torch\n\n\nclass Task:\n \"\"\"\n Base class for all workflow tasks.\n \"\"\"\n\n def __init__(\n self,\n action=None,\n select=None,\n unpack=True,\n column=None,\n merge=\"hstack\",\n initialize=None,\n finalize=None,\n concurrency=None,\n onetomany=True,\n **kwargs,\n ):\n \"\"\"\n Creates a new task. A task defines two methods, type of data it accepts and the action to execute\n for each data element. Action is a callable function or list of callable functions.\n\n Args:\n action: action(s) to execute on each data element\n select: filter(s) used to select data to process\n unpack: if data elements should be unpacked or unwrapped from (id, data, tag) tuples\n column: column index to select if element is a tuple, defaults to all\n merge: merge mode for joining multi-action outputs, defaults to hstack\n initialize: action to execute before processing\n finalize: action to execute after processing\n concurrency: sets concurrency method when execute instance available\n valid values: \"thread\" for thread-based concurrency, \"process\" for process-based concurrency\n onetomany: if one-to-many data transformations should be enabled, defaults to True\n kwargs: additional keyword arguments\n \"\"\"\n\n # Standardize into list of actions\n if not action:\n action = []\n elif not isinstance(action, list):\n action = [action]\n\n self.action = action\n self.select = select\n self.unpack = unpack\n self.column = column\n self.merge = merge\n self.initialize = initialize\n self.finalize = finalize\n self.concurrency = concurrency\n self.onetomany = onetomany\n\n # Check for custom registration. Adds additional instance members and validates required dependencies available.\n if hasattr(self, \"register\"):\n self.register(**kwargs)\n elif kwargs:\n # Raise error if additional keyword arguments passed in without register method\n kwargs = \", \".join(f\"'{kw}'\" for kw in kwargs)\n raise TypeError(f\"__init__() got unexpected keyword arguments: {kwargs}\")\n\n def __call__(self, elements, executor=None):\n \"\"\"\n Executes action for a list of data elements.\n\n Args:\n elements: iterable data elements\n executor: execute instance, enables concurrent task actions\n\n Returns:\n transformed data elements\n \"\"\"\n\n if isinstance(elements, list):\n return self.filteredrun(elements, executor)\n\n return self.run(elements, executor)\n\n def filteredrun(self, elements, executor):\n \"\"\"\n Executes a filtered run, which will tag all inputs with a process id, filter elements down to elements the\n task can handle and execute on that subset. Items not selected for processing will be returned unmodified.\n\n Args:\n elements: iterable data elements\n executor: execute instance, enables concurrent task actions\n\n Returns:\n transformed data elements\n \"\"\"\n\n # Build list of elements with unique process ids\n indexed = list(enumerate(elements))\n\n # Filter data down to data this task handles\n data = [(x, self.upack(element)) for x, element in indexed if self.accept(self.upack(element, True))]\n\n # Get list of filtered process ids\n ids = [x for x, _ in data]\n\n # Prepare elements and execute task action(s)\n results = self.execute([self.prepare(element) for _, element in data], executor)\n\n # Pack results back into elements\n if self.merge:\n elements = self.filteredpack(results, indexed, ids)\n else:\n elements = [self.filteredpack(r, indexed, ids) for r in results]\n\n return elements\n\n def filteredpack(self, results, indexed, ids):\n \"\"\"\n Processes and packs results back into original input elements.\n\n Args:\n results: task results\n indexed: original elements indexed by process id\n ids: process ids accepted by this task\n\n Returns:\n packed elements\n \"\"\"\n\n # Update with transformed elements. Handle one to many transformations.\n elements = []\n for x, element in indexed:\n if x in ids:\n # Get result for process id\n result = results[ids.index(x)]\n\n if isinstance(result, OneToMany):\n # One to many transformations\n elements.extend([self.pack(element, r) for r in result])\n else:\n # One to one transformations\n elements.append(self.pack(element, result))\n else:\n # Pass unprocessed elements through\n elements.append(element)\n\n return elements\n\n def run(self, elements, executor):\n \"\"\"\n Executes a task run for elements. A standard run processes all elements.\n\n Args:\n elements: iterable data elements\n executor: execute instance, enables concurrent task actions\n\n Returns:\n transformed data elements\n \"\"\"\n\n # Execute task actions\n results = self.execute(elements, executor)\n\n # Handle one to many transformations\n if isinstance(results, list):\n elements = []\n for result in results:\n if isinstance(result, OneToMany):\n # One to many transformations\n elements.extend(result)\n else:\n # One to one transformations\n elements.append(result)\n\n return elements\n\n return results\n\n def accept(self, element):\n \"\"\"\n Determines if this task can handle the input data format.\n\n Args:\n element: input data element\n\n Returns:\n True if this task can process this data element, False otherwise\n \"\"\"\n\n return (isinstance(element, str) and re.search(self.select, element.lower())) if element is not None and self.select else True\n\n def upack(self, element, force=False):\n \"\"\"\n Unpacks data for processing.\n\n Args:\n element: input data element\n force: if True, data is unpacked even if task has unpack set to False\n\n Returns:\n data\n \"\"\"\n\n # Extract data from (id, data, tag) formatted elements\n if (self.unpack or force) and isinstance(element, tuple):\n return element[1]\n\n return element\n\n def pack(self, element, data):\n \"\"\"\n Packs data after processing.\n\n Args:\n element: transformed data element\n data: item to pack element into\n\n Returns:\n packed data\n \"\"\"\n\n # Pack data into (id, data, tag) formatted elements\n if self.unpack and isinstance(element, tuple):\n # If new data is a (id, data, tag) tuple use that except for multi-action \"hstack\" merges which produce tuples\n if isinstance(data, tuple) and (len(self.action) <= 1 or self.merge != \"hstack\"):\n return data\n\n # Create a copy of tuple, update data element and return\n element = list(element)\n element[1] = data\n return tuple(element)\n\n return data\n\n def prepare(self, element):\n \"\"\"\n Method that allows downstream tasks to prepare data element for processing.\n\n Args:\n element: input data element\n\n Returns:\n data element ready for processing\n \"\"\"\n\n return element\n\n def execute(self, elements, executor):\n \"\"\"\n Executes action(s) on elements.\n\n Args:\n elements: list of data elements\n executor: execute instance, enables concurrent task actions\n\n Returns:\n transformed data elements\n \"\"\"\n\n if self.action:\n # Run actions\n outputs = []\n for x, action in enumerate(self.action):\n # Filter elements by column index if necessary - supports a single int or an action index to column index mapping\n index = self.column[x] if isinstance(self.column, dict) else self.column\n inputs = [self.extract(e, index) for e in elements] if index is not None else elements\n\n # Queue arguments for executor, process immediately if no executor available\n outputs.append((action, inputs) if executor else self.process(action, inputs))\n\n # Run with executor if available\n if executor:\n outputs = executor.run(self.concurrency, self.process, outputs)\n\n # Run post process operations\n return self.postprocess(outputs)\n\n return elements\n\n def extract(self, element, index):\n \"\"\"\n Extracts a column from element by index if the element is a tuple.\n\n Args:\n element: input element\n index: column index\n\n Returns:\n extracted column\n \"\"\"\n\n if isinstance(element, tuple):\n if not self.unpack and len(element) == 3 and isinstance(element[1], tuple):\n return (element[0], element[1][index], element[2])\n\n return element[index]\n\n return element\n\n def process(self, action, inputs):\n \"\"\"\n Executes action using inputs as arguments.\n\n Args:\n action: callable object\n inputs: action inputs\n\n Returns:\n action outputs\n \"\"\"\n\n # Execute action and get outputs\n return action(inputs)\n\n def postprocess(self, outputs):\n \"\"\"\n Runs post process routines after a task action.\n\n Args:\n outputs: task outputs\n\n Returns:\n postprocessed outputs\n \"\"\"\n\n # Unpack single action tasks\n if len(self.action) == 1:\n return self.single(outputs[0])\n\n # Return unmodified outputs when merge set to None\n if not self.merge:\n return outputs\n\n if self.merge == \"vstack\":\n return self.vstack(outputs)\n if self.merge == \"concat\":\n return self.concat(outputs)\n\n # Default mode is hstack\n return self.hstack(outputs)\n\n def single(self, outputs):\n \"\"\"\n Post processes and returns single action outputs.\n\n Args:\n outputs: outputs from a single task\n\n Returns:\n post processed outputs\n \"\"\"\n\n if self.onetomany and isinstance(outputs, list):\n # Wrap one to many transformations\n outputs = [OneToMany(output) if isinstance(output, list) else output for output in outputs]\n\n return outputs\n\n def vstack(self, outputs):\n \"\"\"\n Merges outputs row-wise. Returns a list of lists which will be interpreted as a one to many transformation.\n\n Row-wise merge example (2 actions)\n\n Inputs: [a, b, c]\n\n Outputs => [[a1, b1, c1], [a2, b2, c2]]\n\n Row Merge => [[a1, a2], [b1, b2], [c1, c2]] = [a1, a2, b1, b2, c1, c2]\n\n Args:\n outputs: task outputs\n\n Returns:\n list of aggregated/zipped outputs as one to many transforms (row-wise)\n \"\"\"\n\n # If all outputs are numpy arrays, use native method\n if all(isinstance(output, np.ndarray) for output in outputs):\n return np.concatenate(np.stack(outputs, axis=1))\n\n # If all outputs are torch tensors, use native method\n # pylint: disable=E1101\n if all(torch.is_tensor(output) for output in outputs):\n return torch.cat(tuple(torch.stack(outputs, axis=1)))\n\n # Flatten into lists of outputs per input row. Wrap as one to many transformation.\n merge = []\n for x in zip(*outputs):\n combine = []\n for y in x:\n if isinstance(y, list):\n combine.extend(y)\n else:\n combine.append(y)\n\n merge.append(OneToMany(combine))\n\n return merge\n\n def hstack(self, outputs):\n \"\"\"\n Merges outputs column-wise. Returns a list of tuples which will be interpreted as a one to one transformation.\n\n Column-wise merge example (2 actions)\n\n Inputs: [a, b, c]\n\n Outputs => [[a1, b1, c1], [a2, b2, c2]]\n\n Column Merge => [(a1, a2), (b1, b2), (c1, c2)]\n\n Args:\n outputs: task outputs\n\n Returns:\n list of aggregated/zipped outputs as tuples (column-wise)\n \"\"\"\n\n # If all outputs are numpy arrays, use native method\n if all(isinstance(output, np.ndarray) for output in outputs):\n return np.stack(outputs, axis=1)\n\n # If all outputs are torch tensors, use native method\n # pylint: disable=E1101\n if all(torch.is_tensor(output) for output in outputs):\n return torch.stack(outputs, axis=1)\n\n return list(zip(*outputs))\n\n def concat(self, outputs):\n \"\"\"\n Merges outputs column-wise and concats values together into a string. Returns a list of strings.\n\n Concat merge example (2 actions)\n\n Inputs: [a, b, c]\n\n Outputs => [[a1, b1, c1], [a2, b2, c2]]\n\n Concat Merge => [(a1, a2), (b1, b2), (c1, c2)] => [\"a1. a2\", \"b1. b2\", \"c1. c2\"]\n\n Args:\n outputs: task outputs\n\n Returns:\n list of concat outputs\n \"\"\"\n\n return [\". \".join([str(y) for y in x if y]) for x in self.hstack(outputs)]\n\n\nclass OneToMany:\n \"\"\"\n Encapsulates list output for a one to many transformation.\n \"\"\"\n\n def __init__(self, values):\n \"\"\"\n Creates a new OneToMany transformation.\n\n Args:\n values: list of outputs\n \"\"\"\n\n self.values = values\n\n def __iter__(self):\n return self.values.__iter__()\n"
] | [
[
"torch.stack",
"torch.is_tensor",
"numpy.stack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hyesungKomet/deep-learning-from-scratch-2 | [
"4b60f0b9c9850d0ff987446dbdec18bbdfb5109c"
] | [
"dataset/ptb.py"
] | [
"# coding: utf-8\nimport sys\nimport os\nsys.path.append('..')\ntry:\n import urllib.request\nexcept ImportError:\n raise ImportError('Use Python3!')\nimport pickle\nimport numpy as np\n\n\nurl_base = 'https://raw.githubusercontent.com/tomsercu/lstm/master/data/'\nkey_file = {\n 'train':'ptb.train.txt',\n 'test':'ptb.test.txt',\n 'valid':'ptb.valid.txt'\n}\nsave_file = {\n 'train':'ptb.train.npy',\n 'test':'ptb.test.npy',\n 'valid':'ptb.valid.npy'\n}\nvocab_file = 'ptb.vocab.pkl'\n\ndataset_dir = os.path.dirname(os.path.abspath(__file__))\n\n\ndef _download(file_name):\n file_path = dataset_dir + '/' + file_name\n if os.path.exists(file_path):\n return\n\n print('Downloading ' + file_name + ' ... ')\n\n try:\n urllib.request.urlretrieve(url_base + file_name, file_path)\n except urllib.error.URLError:\n import ssl\n ssl._create_default_https_context = ssl._create_unverified_context\n urllib.request.urlretrieve(url_base + file_name, file_path)\n\n print('Done')\n\n\ndef load_vocab():\n vocab_path = dataset_dir + '/' + vocab_file\n\n if os.path.exists(vocab_path):\n with open(vocab_path, 'rb') as f:\n word_to_id, id_to_word = pickle.load(f)\n return word_to_id, id_to_word\n\n word_to_id = {}\n id_to_word = {}\n data_type = 'train'\n file_name = key_file[data_type]\n file_path = dataset_dir + '/' + file_name\n\n _download(file_name)\n\n words = open(file_path).read().replace('\\n', '<eos>').strip().split()\n\n for i, word in enumerate(words):\n if word not in word_to_id:\n tmp_id = len(word_to_id)\n word_to_id[word] = tmp_id\n id_to_word[tmp_id] = word\n\n with open(vocab_path, 'wb') as f:\n pickle.dump((word_to_id, id_to_word), f)\n\n return word_to_id, id_to_word\n\n\ndef load_data(data_type='train'):\n '''\n :param data_type: 데이터 유형: 'train' or 'test' or 'valid (val)'\n :return:\n '''\n if data_type == 'val': data_type = 'valid'\n save_path = dataset_dir + '/' + save_file[data_type]\n\n word_to_id, id_to_word = load_vocab()\n\n if os.path.exists(save_path):\n corpus = np.load(save_path)\n return corpus, word_to_id, id_to_word\n\n file_name = key_file[data_type]\n file_path = dataset_dir + '/' + file_name\n _download(file_name)\n\n words = open(file_path).read().replace('\\n', '<eos>').strip().split()\n corpus = np.array([word_to_id[w] for w in words])\n\n np.save(save_path, corpus)\n return corpus, word_to_id, id_to_word\n\n\nif __name__ == '__main__':\n for data_type in ('train', 'val', 'test'):\n load_data(data_type)\n"
] | [
[
"numpy.load",
"numpy.array",
"numpy.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
manimoh/python-neo | [
"fa7b025cb6a3be27d4f1a48bd5f26cb89d8af9c3"
] | [
"neo/test/iotest/test_axographio.py"
] | [
"\"\"\"\nTests of neo.io.axographio\n\"\"\"\n\nfrom datetime import datetime\nimport unittest\n\nfrom neo.io import AxographIO\nfrom neo.test.iotest.common_io_test import BaseTestIO\n\nimport numpy as np\nfrom numpy.testing import assert_equal\nimport quantities as pq\n\n\nclass TestAxographIO(BaseTestIO, unittest.TestCase):\n files_to_test = [\n 'AxoGraph_Graph_File', # version 1 file, provided with AxoGraph\n 'AxoGraph_Digitized_File', # version 2 file, provided with AxoGraph\n 'AxoGraph_X_File.axgx', # version 5 file, provided with AxoGraph\n 'File_axograph.axgd', # version 6 file\n 'episodic.axgd',\n 'events_and_epochs.axgx',\n 'written-by-axographio-with-linearsequence.axgx',\n 'written-by-axographio-without-linearsequence.axgx',\n 'corrupt-comment.axgx',\n ]\n files_to_download = files_to_test\n ioclass = AxographIO\n\n def test_version_1(self):\n \"\"\"Test reading a version 1 AxoGraph file\"\"\"\n\n filename = self.get_filename_path('AxoGraph_Graph_File')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n assert_equal(blk.annotations['format_ver'], 1)\n\n names = [sig.name for sig in blk.segments[0].analogsignals]\n assert_equal(names, ['Current', 'Current'])\n\n sig = blk.segments[0].analogsignals[0][:5]\n arr = sig.as_array('pA')\n target = np.array([[-5.5078130],\n [-3.1171880],\n [+1.6640626],\n [+1.6640626],\n [+4.0546880]], dtype=np.float32)\n assert_equal(arr, target)\n\n assert_equal(sig.t_start, 0.0005000000237487257 * pq.s)\n\n assert_equal(sig.sampling_period, 0.0005000010132789612 * pq.s)\n\n def test_version_2(self):\n \"\"\"Test reading a version 2 AxoGraph file\"\"\"\n\n filename = self.get_filename_path('AxoGraph_Digitized_File')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n assert_equal(blk.annotations['format_ver'], 2)\n\n names = [sig.name for sig in blk.segments[0].analogsignals]\n assert_equal(names, ['Current', 'Voltage', 'Column4', 'Column5',\n 'Column6', 'Column7', 'Column8', 'Column9',\n 'Column10', 'Column11', 'Column12', 'Column13',\n 'Column14', 'Column15', 'Column16', 'Column17',\n 'Column18', 'Column19', 'Column20', 'Column21',\n 'Column22', 'Column23', 'Column24', 'Column25',\n 'Column26', 'Column27', 'Column28', 'Column29'])\n\n sig = blk.segments[0].analogsignals[0][:5]\n arr = sig.as_array('pA')\n target = np.array([[0.3125],\n [9.6875],\n [9.6875],\n [9.6875],\n [9.3750]], dtype=np.float32)\n assert_equal(arr, target)\n\n assert_equal(sig.t_start, 0.00009999999747378752 * pq.s)\n\n assert_equal(sig.sampling_period, 0.00009999999747378750 * pq.s)\n\n def test_version_5(self):\n \"\"\"Test reading a version 5 AxoGraph file\"\"\"\n\n filename = self.get_filename_path('AxoGraph_X_File.axgx')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n assert_equal(blk.annotations['format_ver'], 5)\n\n names = [sig.name for sig in blk.segments[0].analogsignals]\n assert_equal(names, ['Current', '', '', '', '', ''])\n\n sig = blk.segments[0].analogsignals[0][:5]\n arr = sig.as_array('pA')\n target = np.array([[+3.0846775],\n [-2.5403225],\n [-1.2903225],\n [+6.8346770],\n [-5.0403230]], dtype=np.float32)\n assert_equal(arr, target)\n\n assert_equal(sig.t_start, 0.00005 * pq.s)\n\n assert_equal(sig.sampling_period, 0.00005 * pq.s)\n\n def test_version_6(self):\n \"\"\"Test reading a version 6 AxoGraph file\"\"\"\n\n filename = self.get_filename_path('File_axograph.axgd')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n assert_equal(blk.annotations['format_ver'], 6)\n\n names = [sig.name for sig in blk.segments[0].analogsignals]\n assert_equal(names, ['Membrane Voltage-1'])\n\n sig = blk.segments[0].analogsignals[0][:5]\n arr = sig.as_array('mV')\n target = np.array([[-60.731834],\n [-60.701313],\n [-60.670795],\n [-60.701313],\n [-60.731834]], dtype=np.float32)\n assert_equal(arr, target)\n\n assert_equal(sig.t_start, 0.00002 * pq.s)\n\n assert_equal(sig.sampling_period, 0.00002 * pq.s)\n\n def test_file_written_by_axographio_package_with_linearsequence(self):\n \"\"\"Test reading file written by axographio package with linearsequence time column\"\"\"\n\n filename = self.get_filename_path('written-by-axographio-with-linearsequence.axgx')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n assert_equal(blk.annotations['format_ver'], 6)\n\n names = [sig.name for sig in blk.segments[0].analogsignals]\n assert_equal(names, ['Data 1', 'Data 2'])\n\n sig = blk.segments[0].analogsignals[0][:5]\n arr = sig.as_array('mV')\n target = np.array([[0.000000],\n [9.999833],\n [19.998667],\n [29.995500],\n [39.989334]], dtype=np.float32)\n assert_equal(arr, target)\n\n assert_equal(sig.t_start, 0 * pq.s)\n\n assert_equal(sig.sampling_period, 0.01 * pq.s)\n\n def test_file_written_by_axographio_package_without_linearsequence(self):\n \"\"\"Test reading file written by axographio package without linearsequence time column\"\"\"\n\n filename = self.get_filename_path('written-by-axographio-without-linearsequence.axgx')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n assert_equal(blk.annotations['format_ver'], 6)\n\n names = [sig.name for sig in blk.segments[0].analogsignals]\n assert_equal(names, ['Data 1', 'Data 2'])\n\n sig = blk.segments[0].analogsignals[0][:5]\n arr = sig.as_array('mV')\n target = np.array([[0.000000],\n [9.999833],\n [19.998667],\n [29.995500],\n [39.989334]], dtype=np.float32)\n assert_equal(arr, target)\n\n assert_equal(sig.t_start, 0 * pq.s)\n\n assert_equal(sig.sampling_period, 0.009999999999999787 * pq.s)\n\n def test_file_with_corrupt_comment(self):\n \"\"\"Test reading a file with a corrupt comment\"\"\"\n\n filename = self.get_filename_path('corrupt-comment.axgx')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n assert_equal(blk.annotations['format_ver'], 6)\n\n names = [sig.name for sig in blk.segments[0].analogsignals]\n assert_equal(names, ['Data 1', 'Data 2'])\n\n sig = blk.segments[0].analogsignals[0][:5]\n arr = sig.as_array('mV')\n target = np.array([[0.000000],\n [9.999833],\n [19.998667],\n [29.995500],\n [39.989334]], dtype=np.float32)\n assert_equal(arr, target)\n\n assert_equal(sig.t_start, 0 * pq.s)\n\n assert_equal(sig.sampling_period, 0.01 * pq.s)\n\n def test_multi_segment(self):\n \"\"\"Test reading an episodic file into multiple Segments\"\"\"\n\n filename = self.get_filename_path('episodic.axgd')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n\n assert_equal(len(blk.segments), 30)\n assert_equal(len(blk.groups), 2)\n assert_equal(len(blk.segments[0].analogsignals), 2)\n\n names = [sig.name for sig in blk.segments[0].analogsignals]\n assert_equal(names, ['CAP', 'STIM'])\n\n sig = blk.segments[0].analogsignals[0][:5]\n arr = sig.as_array('V')\n target = np.array([[1.37500e-06],\n [1.53125e-06],\n [1.34375e-06],\n [1.09375e-06],\n [1.21875e-06]], dtype=np.float32)\n assert_equal(arr, target)\n\n def test_force_single_segment(self):\n \"\"\"Test reading an episodic file into one Segment\"\"\"\n\n filename = self.get_filename_path('episodic.axgd')\n reader = AxographIO(filename=filename, force_single_segment=True)\n blk = reader.read_block(signal_group_mode='split-all')\n\n assert_equal(len(blk.segments), 1)\n assert_equal(len(blk.groups), 60)\n assert_equal(len(blk.segments[0].analogsignals), 60)\n\n names = [sig.name for sig in blk.segments[0].analogsignals]\n assert_equal(names, ['CAP', 'STIM'] * 30)\n\n sig = blk.segments[0].analogsignals[0][:5]\n arr = sig.as_array('V')\n target = np.array([[1.37500e-06],\n [1.53125e-06],\n [1.34375e-06],\n [1.09375e-06],\n [1.21875e-06]], dtype=np.float32)\n assert_equal(arr, target)\n\n def test_group_by_same_units(self):\n \"\"\"Test reading with group-by-same-units\"\"\"\n\n filename = self.get_filename_path('episodic.axgd')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='group-by-same-units')\n\n assert_equal(len(blk.segments), 30)\n assert_equal(len(blk.groups), 1)\n assert_equal(len(blk.segments[0].analogsignals), 1)\n\n chan_names = blk.segments[0].analogsignals[0].array_annotations['channel_names']\n assert_equal(chan_names, ['CAP', 'STIM'])\n\n sig = blk.segments[0].analogsignals[0][:5]\n arr = sig.as_array('V')\n target = np.array([[1.37500e-06, 3.43750e-03],\n [1.53125e-06, 2.81250e-03],\n [1.34375e-06, 1.87500e-03],\n [1.09375e-06, 1.56250e-03],\n [1.21875e-06, 1.56250e-03]], dtype=np.float32)\n assert_equal(arr, target)\n\n def test_events_and_epochs(self):\n \"\"\"Test loading events and epochs\"\"\"\n\n filename = self.get_filename_path('events_and_epochs.axgx')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n\n event = blk.segments[0].events[0]\n assert_equal(event.times, [5999, 5999, 23499, 23499,\n 26499, 26499, 35999]\n * blk.segments[0].analogsignals[0].sampling_period)\n assert_equal(event.labels, ['Stop', 'Start', 'Stop', 'Start',\n 'Stop', 'Start', 'Stop'])\n\n epoch = blk.segments[0].epochs[0]\n assert_equal(epoch.times, np.array([0.1, 4]) * pq.s)\n assert_equal(epoch.durations, np.array([1.4, 2]) * pq.s)\n assert_equal(epoch.labels, ['test interval 1', 'test interval 2'])\n\n def test_rec_datetime(self):\n \"\"\"Test parsing the recording datetime from notes\"\"\"\n\n # parsing of rec_datetime differs depending on acquisition mode\n\n # file obtained in episodic acquisition mode has date and time on\n # separate lines of notes\n filename = self.get_filename_path('episodic.axgd')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n assert_equal(blk.rec_datetime, datetime(2018, 6, 7, 15, 11, 36))\n\n # file obtained in continuous acquisition mode has date and time in\n # single line of notes\n filename = self.get_filename_path('events_and_epochs.axgx')\n reader = AxographIO(filename=filename)\n blk = reader.read_block(signal_group_mode='split-all')\n assert_equal(blk.rec_datetime, datetime(2019, 5, 25, 20, 16, 25))\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] | [
[
"numpy.testing.assert_equal",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HypoChloremic/annuc | [
"4d13ce557b16b8d643e2edb39bbf09b97c123f6b"
] | [
"run.py"
] | [
"# Running annuc\r\n# (c) 2017 Ali Rassolie\r\n# Important to note that this has to be run in the command line environment\r\n# bokeh serve --show first_plot.py\r\n\r\n\r\nfrom collections import OrderedDict\r\nfrom bokeh.charts import Bar, output_file, show\r\nfrom bokeh.io import curdoc\r\n\r\nimport matplotlib.pyplot as plt\r\nimport pandas as pd\r\nimport numpy as np \r\nimport annuc\r\n\r\nclass constants:\r\n\tcounter_files = list()\r\n\r\ndef start_annuc(searchtype=None, amount=2, header=None, taildata=None, tailsample=None):\r\n\t\r\n\tfor pos_of_file in range(amount):\r\n\t\toutput = \"{}_{}_results.vcf\".format(header, pos_of_file)\r\n\t\thass_input = \"{}_{}_{}\".format(header, pos_of_file, taildata)\r\n\t\tspec_input = \"{}_{}_{}\".format(header, pos_of_file, tailsample)\r\n\t\toutput_counter = \"{}_{}_counter.vcf\".format(header, pos_of_file)\r\n\t\tprint(hass_input, spec_input)\r\n\t\tconstants.counter_files.append(output_counter)\r\n\r\n\t\ta = annuc.annuc(searchtype=searchtype, output=output, dictinput=spec_input, hass=hass_input)\r\n\t\ta.filter(slicesize=1)\r\n\t\ta.prod_counter(infile=output, outcountfile=output_counter)\r\n\r\n\r\n\r\ndef histo_matplot(infile=None):\r\n\r\n\twith open(infile, \"r\") as file:\r\n\t\tinfo = [ line.replace(\"\\n\", \"\") for line in file ]\r\n\t\tcombo = [ combo.split(\"\\t\")[0] for combo in info ]\r\n\t\tamount = [ float(appearances.split(\"\\t\")[1]) for appearances in info ]\r\n\tdata_as_dict = { combo[pos]: amount[pos] for pos in range(len(combo)) }\r\n\tbase = range(len(combo))\r\n\trects = plt.bar(base, amount, align=\"center\", width=0.5)\r\n\tplt.xticks(base, combo)\r\n\tplt.ylabel(\"Number of appearances\")\r\n\tplt.xlabel(\"Ref-Alt combination\")\r\n\tplt.title(\"{}\".format(infile))\r\n\tfor rect in rects:\r\n\t\theight = rect.get_height()\r\n\t\tplt.text(rect.get_x() + rect.get_width()/2., 1.05*height,'%d' % int(height),ha='center', va='bottom')\r\n\tplt.show()\r\n\r\ndef histo_bokeh(infile=None):\r\n\twith open(infile, \"r\") as file:\r\n\t\tinfo = [ line.replace(\"\\n\", \"\") for line in file ]\r\n\t\r\n\tcombo = [ combo.split(\"\\t\")[0] for combo in info ]\r\n\tamount = [ int(appearances.split(\"\\t\")[1]) for appearances in info ]\r\n\r\n\tdata_as_dict = { combo[pos]: amount[pos] for pos in range(len(combo)) }\r\n\t\r\n\r\n\tordered_data_as_dict = OrderedDict(data_as_dict)\r\n\tordered_data_as_dict = pd.Series(ordered_data_as_dict, index=ordered_data_as_dict.keys())\r\n\tbar = Bar(ordered_data_as_dict, title=\"Stacked bars\")\r\n\toutput_file(\"stacked_bar.html\")\r\n\tcurdoc().add_root(bar)\r\n\tshow(bar)\r\n\r\nhisto_bokeh(infile=\"malbac_0_counter.vcf\")\r\n\r\n\r\n# histo_bokeh()\r\n# Using this with bokeh, is not wanted.. took me hours to figure debug this. \r\n# Nothing was provided which made this clear\r\n# if __name__ == '__main__':"
] | [
[
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NTU-ROSE/fast-reid | [
"f4551a128ba17ef201301ccf3c986edae014cabd"
] | [
"projects/PartialReID/partialreid/dsr_distance.py"
] | [
"\"\"\"Numpy version of euclidean distance, etc.\nNotice the input/output shape of methods, so that you can better understand\nthe meaning of these methods.\"\"\"\n\nimport numpy as np\nimport torch\n\n\ndef normalize(nparray, order=2, axis=0):\n \"\"\"Normalize a N-D numpy array along the specified axis.\"\"\"\n norm = np.linalg.norm(nparray, ord=order, axis=axis, keepdims=True)\n return nparray / (norm + np.finfo(np.float32).eps)\n\n\ndef compute_dsr_dist(array1, array2, distmat, scores):\n \"\"\" Compute the sptial feature reconstruction of all pairs\n array: [M, N, C] M: the number of query, N: the number of spatial feature, C: the dimension of each spatial feature\n array2: [M, N, C] M: the number of gallery\n :return:\n numpy array with shape [m1, m2]\n \"\"\"\n dist = 100 * torch.ones(len(array1), len(array2))\n dist = dist.cuda()\n kappa = 0.001\n index = np.argsort(distmat, axis=1)\n T = kappa * torch.eye(110)\n T = T.cuda()\n M = []\n for i in range(0, len(array2)):\n g = array2[i]\n g = torch.FloatTensor(g)\n g = g.view(g.size(0), g.size(1))\n g = g.cuda()\n Proj_M1 = torch.matmul(torch.inverse(torch.matmul(g.t(), g) + T), g.t())\n Proj_M1 = Proj_M1.cpu().numpy()\n M.append(Proj_M1)\n for i in range(0, len(array1)):\n q = torch.FloatTensor(array1[i])\n q = q.view(q.size(0), q.size(1))\n q = q.cuda()\n for j in range(0, 100):\n g = array2[index[i, j]]\n g = torch.FloatTensor(g)\n g = g.view(g.size(0), g.size(1))\n g = g.cuda()\n Proj_M = torch.FloatTensor(M[index[i, j]])\n Proj_M = Proj_M.cuda()\n a = torch.matmul(g, torch.matmul(Proj_M, q)) - q\n dist[i, index[i, j]] = ((torch.pow(a, 2).sum(0).sqrt()) * scores[i].cuda()).sum()\n dist = dist.cpu()\n dist = dist.numpy()\n\n return dist\n"
] | [
[
"torch.eye",
"numpy.linalg.norm",
"numpy.finfo",
"torch.matmul",
"torch.FloatTensor",
"numpy.argsort",
"torch.pow"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lbfinkbeiner/spin1_beam_model | [
"bdb5af058ab0430602de6595a2f0cfdb5d26b41f"
] | [
"src/spin1_beam_model/jones_matrix_field.py"
] | [
"import h5py\nimport numba as nb\nimport numpy as np\nimport ssht_numba as sshtn\nfrom scipy.interpolate import Rbf, RectBivariateSpline, interp1d\n\n\nclass AntennaFarFieldResponse:\n \"\"\"\n A model of the farfield response of an antenna specified by the data product\n of a cst_processing.CSTDataProcessor object.\n\n Frequencies specified in units of MHz.\n \"\"\"\n\n def __init__(self, spin1_data_file_path):\n with h5py.File(spin1_data_file_path, \"r\") as h5f:\n self.pos1_Elm_samples = h5f[\"pos1_Elm\"].value\n self.neg1_Elm_samples = h5f[\"neg1_Elm\"].value\n self.freq_nodes = h5f[\"frequencies\"].value\n self.L_model = h5f[\"spatial_bandlimit\"].value\n self.zenith_theta = h5f[\"zenith_theta\"].value\n self.zenith_phi = h5f[\"zenith_phi\"].value\n\n self.dual_feed = False\n\n def derive_symmetric_rotated_feed(self, rotation_angle_sign=\"positive\"):\n \"\"\"\n Compute the spatial harmonics for an antenna feed identical to the one\n specified by the input data, but rotated by 90 degrees about the z-axis.\n\n The sign conventions are those of McEwen and Wiaux 2011, Equation 2.\n\n For the cannonical CST output coordinates, in which:\n -the dipole feed is oriented along 'x'\n -the boresight is along 'z'\n -the 'theta' angle corrdinate is measured away from the 'z' axis,\n i.e. z(theta=0) = 1, and 0 <= theta <= pi\n -the 'phi' azimuthal angle coordinate is measured from 'x' to 'y',\n and 0 <= phi < 2*pi\n\n the rotated harmonics obtained with rotation_angle_sign='positive'\n correspond to an East-West oriented antenna feed, while the input\n harmonics correspond to a North-South oriented feed.\n \"\"\"\n if rotation_angle_sign == \"negative\":\n rot_angle = -np.pi / 2.0\n elif rotation_angle_sign == \"positive\":\n rot_angle = np.pi / 2.0\n else:\n raise ValueError(\n \"The parameter rotation_angle_sign must be 'negative' or 'positive'.\"\n )\n\n rotation_operator = np.zeros(self.L_model ** 2, dtype=np.complex128)\n for ii in range(rotation_operator.size):\n el, m = sshtn.ind2elm(ii)\n arg = m * rot_angle\n rotation_operator[ii] = np.cos(arg) - 1j * np.sin(arg)\n\n self.pos1_rElm_samples = rotation_operator[None, :] * self.pos1_Elm_samples\n self.neg1_rElm_samples = rotation_operator[None, :] * self.neg1_Elm_samples\n\n self.dual_feed = True\n\n def interpolate_spatial_harmonics_spectra(self, nu_axis, interp_method=\"sinc_rbf\"):\n \"\"\"\n Evaluates the spatial harmonics at the frequency points specified\n (in MHz) by nu_axis.\n\n interp_method:\n -'sinc_rbf' is a radial basis function interpolation using a\n sinc kernel with length scale set by the sampling rate of the input data.\n This effectively assumes that the input data is Nyquist sampled, and aims\n to avoid both 1) additional smoothing beyond what was done in the initial\n processing to derive the model data, or 2) extrapolating beyond what\n the input data supports.\n\n -'cubic_spline' uses a cubic spline.\n\n The fits are always over the full input frequency band, even if `nu_axis`\n is only covers a small subset of `freq_nodes`.\n \"\"\"\n self.nu_axis = nu_axis\n\n if interp_method == \"sinc_rbf\":\n\n # harmonic mean of input sample spacing\n # delta_nu_in = 1./np.mean(1./np.diff(self.freq_nodes))\n delta_nu_in = np.diff(self.freq_nodes)[0]\n print(\"delta_nu_in is\", delta_nu_in)\n\n def sinc_kernel(self, r):\n tau_c = 1.0 / (2 * self.epsilon)\n\n r = np.where(r == 0, 1e-20, r)\n y = 2 * np.pi * tau_c * r\n kernel = np.sin(y) / y\n return kernel\n\n def rbf_obj(data):\n rbf = Rbf(\n self.freq_nodes,\n data,\n function=sinc_kernel,\n epsilon=delta_nu_in,\n smooth=0.0,\n )\n return rbf\n\n self.pos1_Elm = np.zeros(\n (nu_axis.size, self.L_model ** 2), dtype=np.complex\n )\n self.neg1_Elm = np.zeros(\n (nu_axis.size, self.L_model ** 2), dtype=np.complex\n )\n\n for ii in range(self.L_model ** 2):\n re_pos1_Elm_rbf = rbf_obj(self.pos1_Elm_samples[:, ii].real)\n im_pos1_Elm_rbf = rbf_obj(self.pos1_Elm_samples[:, ii].imag)\n\n self.pos1_Elm[:, ii] = re_pos1_Elm_rbf(nu_axis) + 1j * im_pos1_Elm_rbf(\n nu_axis\n )\n\n re_neg1_Elm_rbf = rbf_obj(self.neg1_Elm_samples[:, ii].real)\n im_neg1_Elm_rbf = rbf_obj(self.neg1_Elm_samples[:, ii].imag)\n\n self.neg1_Elm[:, ii] = re_neg1_Elm_rbf(nu_axis) + 1j * im_neg1_Elm_rbf(\n nu_axis\n )\n\n elif interp_method == \"cubic_spline\":\n\n re_pos1_Elm_spl = interp1d(\n self.freq_nodes, self.pos1_Elm_samples.real, kind=\"cubic\", axis=0\n )\n im_pos1_Elm_spl = interp1d(\n self.freq_nodes, self.pos1_Elm_samples.imag, kind=\"cubic\", axis=0\n )\n\n self.pos1_Elm = re_pos1_Elm_spl(nu_axis) + 1j * im_pos1_Elm_spl(nu_axis)\n\n ###\n re_neg1_Elm_spl = interp1d(\n self.freq_nodes, self.neg1_Elm_samples.real, kind=\"cubic\", axis=0\n )\n im_neg1_Elm_spl = interp1d(\n self.freq_nodes, self.neg1_Elm_samples.imag, kind=\"cubic\", axis=0\n )\n\n self.neg1_Elm = re_neg1_Elm_spl(nu_axis) + 1j * im_neg1_Elm_spl(nu_axis)\n else:\n raise ValueError(\"interp_method must be 'sinc_rbf' or 'cubic_spline'\")\n\n zth = self.zenith_theta\n zph = self.zenith_phi\n delta = sshtn.generate_dl(np.pi / 2.0, self.L_model)\n\n zen_Eabs = np.zeros(nu_axis.size)\n for ii in range(nu_axis.size):\n zen_pos1_E = ssht_numba_series_eval(\n self.pos1_Elm[ii],\n 1,\n self.L_model,\n delta,\n np.array([zth]),\n np.array([zph]),\n )\n zen_neg1_E = ssht_numba_series_eval(\n self.neg1_Elm[ii],\n -1,\n self.L_model,\n delta,\n np.array([zth]),\n np.array([zph]),\n )\n\n zen_Et = (zen_pos1_E + zen_neg1_E) / np.sqrt(2.0)\n zen_Ep = (zen_pos1_E - zen_neg1_E) * (-1j) / np.sqrt(2.0)\n zen_Eabs[ii] = np.sqrt(np.abs(zen_Et) ** 2.0 + np.abs(zen_Ep) ** 2.0)\n\n self.pos1_Elm /= zen_Eabs[:, None]\n self.neg1_Elm /= zen_Eabs[:, None]\n\n if self.dual_feed:\n if interp_method == \"sinc_rbf\":\n\n self.pos1_rElm = np.zeros(\n (nu_axis.size, self.L_model ** 2), dtype=np.complex\n )\n self.neg1_rElm = np.zeros(\n (nu_axis.size, self.L_model ** 2), dtype=np.complex\n )\n\n for ii in range(self.L_model ** 2):\n re_pos1_rElm_rbf = rbf_obj(self.pos1_rElm_samples[:, ii].real)\n im_pos1_rElm_rbf = rbf_obj(self.pos1_rElm_samples[:, ii].imag)\n\n self.pos1_rElm[:, ii] = re_pos1_rElm_rbf(\n nu_axis\n ) + 1j * im_pos1_rElm_rbf(nu_axis)\n\n re_neg1_rElm_rbf = rbf_obj(self.neg1_rElm_samples[:, ii].real)\n im_neg1_rElm_rbf = rbf_obj(self.neg1_rElm_samples[:, ii].imag)\n\n self.neg1_rElm[:, ii] = re_neg1_rElm_rbf(\n nu_axis\n ) + 1j * im_neg1_rElm_rbf(nu_axis)\n\n elif interp_method == \"cubic_spline\":\n\n re_pos1_rElm_spl = interp1d(\n self.freq_nodes, self.pos1_rElm_samples.real, kind=\"cubic\", axis=0\n )\n im_pos1_rElm_spl = interp1d(\n self.freq_nodes, self.pos1_rElm_samples.imag, kind=\"cubic\", axis=0\n )\n\n self.pos1_rElm = re_pos1_rElm_spl(nu_axis) + 1j * im_pos1_rElm_spl(\n nu_axis\n )\n\n ###\n re_neg1_rElm_spl = interp1d(\n self.freq_nodes, self.neg1_rElm_samples.real, kind=\"cubic\", axis=0\n )\n im_neg1_rElm_spl = interp1d(\n self.freq_nodes, self.neg1_rElm_samples.imag, kind=\"cubic\", axis=0\n )\n\n self.neg1_rElm = re_neg1_rElm_spl(nu_axis) + 1j * im_neg1_rElm_spl(\n nu_axis\n )\n\n else:\n raise ValueError(\"interp_method must be 'sinc_rbf' or 'cubic_spline'\")\n\n zen_rEabs = np.zeros(nu_axis.size)\n for ii in range(nu_axis.size):\n zen_pos1_rE = ssht_numba_series_eval(\n self.pos1_rElm[ii],\n 1,\n self.L_model,\n delta,\n np.array([zth]),\n np.array([zph]),\n )\n zen_neg1_rE = ssht_numba_series_eval(\n self.neg1_rElm[ii],\n -1,\n self.L_model,\n delta,\n np.array([zth]),\n np.array([zph]),\n )\n\n zen_rEt = (zen_pos1_rE + zen_neg1_rE) / np.sqrt(2.0)\n zen_rEp = (zen_pos1_rE - zen_neg1_rE) * (-1j) / np.sqrt(2.0)\n zen_rEabs[ii] = np.sqrt(np.abs(zen_rEt) ** 2.0 + np.abs(zen_rEp) ** 2.0)\n\n self.pos1_rElm /= zen_rEabs[:, None]\n self.neg1_rElm /= zen_rEabs[:, None]\n\n def compute_spatial_spline_approximations(\n self, nu_axis, L_synth=\"model\", interp_method=\"sinc_rbf\"\n ):\n \"\"\"\n Compute a 2D cubic spline approximation of the elements of the components\n of the Jones matrix at each specified frequency.\n\n Parameters:\n nu_axis: the frequencies at which to compute the spatial 2D spline\n approximations.\n L_synth: an angular bandlimit that defines the spatial resolution of\n the data used to derive the spline function.\n\n \"\"\"\n if not self.dual_feed:\n raise ValueError(\n \"Only data for a single feed is set, \"\n \"this method requires data for dual feeds.\"\n )\n\n self.interpolate_spatial_harmonics_spectra(nu_axis, interp_method=interp_method)\n\n if L_synth == \"model\":\n L_synth = self.L_model\n if L_synth < self.L_model:\n # there nothing wrong in principle with L_synth < L_model, but\n # the code doesn't support it right now (see pad_Elm function def).\n raise ValueError(\n \"The synthesized bandlimit (L_synth) must be higher than the bandlimit \"\n \"of the model (L_synth > L_model).\"\n )\n\n theta_axis, phi_axis = sshtn.mwss_sample_positions(L_synth)\n\n mu_axis = np.cos(theta_axis)\n\n mu_axis_flip = np.flipud(mu_axis)\n phi_axis_pad = np.r_[phi_axis, np.array([2 * np.pi])]\n\n Nfreq = self.nu_axis.size\n\n def pad_Elm(Elm, L_padded):\n \"\"\"\n Pad an array of spatial harmonic modes with zeros, for use in fast\n synthesis of arbitrarily high resolution regularly gridded maps.\n \"\"\"\n L_in = int(np.sqrt(Elm.size))\n Elm_padded = np.zeros(L_padded ** 2, dtype=np.complex128)\n Elm_padded[: L_in ** 2] = Elm\n return Elm_padded\n\n def flipped_and_periodic_padded(E):\n E_flip = np.flip(E, axis=0)\n periodic_padding = E_flip[:, 0].reshape(-1, 1)\n E_fliped_and_padded = np.append(E_flip, periodic_padding, 1)\n\n return E_fliped_and_padded\n\n def spline_from_data(grid_data):\n grid_data_use = flipped_and_periodic_padded(grid_data)\n spl = RectBivariateSpline(\n mu_axis_flip,\n phi_axis_pad,\n grid_data_use,\n bbox=[-1.0, 1.0, 0.0, 2 * np.pi],\n kx=3,\n ky=3,\n s=0,\n )\n return spl\n\n def splines_from_harmonics(pos1_Elm, neg1_Elm, L_synth):\n pos1_Elm_pad = pad_Elm(pos1_Elm, L_synth)\n neg1_Elm_pad = pad_Elm(neg1_Elm, L_synth)\n\n pos1_E = np.empty(sshtn.mwss_sample_shape(L_synth), dtype=np.complex128)\n sshtn.mw_inverse_sov_sym_ss(pos1_Elm_pad, L_synth, 1, pos1_E)\n\n neg1_E = np.empty(sshtn.mwss_sample_shape(L_synth), dtype=np.complex128)\n sshtn.mw_inverse_sov_sym_ss(neg1_Elm_pad, L_synth, -1, neg1_E)\n\n Et = (pos1_E + neg1_E) / np.sqrt(2.0)\n Ep = (pos1_E - neg1_E) * (-1j) / np.sqrt(2.0)\n\n re_Et_spl, im_Et_spl = [spline_from_data(f(Et)) for f in [np.real, np.imag]]\n\n re_Ep_spl, im_Ep_spl = [spline_from_data(f(Ep)) for f in [np.real, np.imag]]\n\n return ((re_Et_spl, im_Et_spl), (re_Ep_spl, im_Ep_spl))\n\n self.E_spls = [\n splines_from_harmonics(p, n, L_synth)\n for (p, n) in zip(self.pos1_Elm, self.neg1_Elm)\n ]\n self.rE_spls = [\n splines_from_harmonics(p, n, L_synth)\n for (p, n) in zip(self.pos1_rElm, self.neg1_rElm)\n ]\n\n # knots for all the splines are the same\n self.xknots, self.yknots = self.E_spls[0][0][0].get_knots()\n\n # orders of the constructed splines\n self.kx = 3\n self.ky = 3\n\n # number of coefficients for each spline\n N_c = (self.E_spls[0][0][0].get_coeffs()).shape[0]\n\n self.E_spl_coeffs = np.zeros((Nfreq, 2, 2, N_c), dtype=np.float64)\n self.rE_spl_coeffs = np.zeros((Nfreq, 2, 2, N_c), dtype=np.float64)\n\n for ii in range(Nfreq):\n for aa in range(2):\n for bb in range(2):\n self.E_spl_coeffs[ii, aa, bb] = self.E_spls[ii][aa][bb].get_coeffs()\n self.rE_spl_coeffs[ii, aa, bb] = self.rE_spls[ii][aa][\n bb\n ].get_coeffs()\n\n def construct_component_functions(E_spl):\n def Et_func(theta, phi, grid=False):\n mu = np.cos(theta)\n return E_spl[0][0](mu, phi, grid=grid) + 1j * E_spl[0][1](\n mu, phi, grid=grid\n )\n\n def Ep_func(theta, phi, grid=False):\n mu = np.cos(theta)\n return E_spl[1][0](mu, phi, grid=grid) + 1j * E_spl[1][1](\n mu, phi, grid=grid\n )\n\n return Et_func, Ep_func\n\n def construct_directivity_function(Et_spl, Ep_spl):\n def D_func(theta, phi, grid=False):\n return (\n np.abs(Et_spl(theta, phi, grid=grid)) ** 2.0\n + np.abs(Ep_spl(theta, phi, grid=grid)) ** 2.0\n )\n\n return D_func\n\n self.Et_funcs = []\n self.Ep_funcs = []\n self.rEt_funcs = []\n self.rEp_funcs = []\n\n for ii in range(Nfreq):\n\n Et_func, Ep_func = construct_component_functions(self.E_spls[ii])\n self.Et_funcs.append(Et_func)\n self.Ep_funcs.append(Ep_func)\n\n rEt_func, rEp_func = construct_component_functions(self.rE_spls[ii])\n self.rEt_funcs.append(rEt_func)\n self.rEp_funcs.append(rEp_func)\n\n self.D_funcs = [\n construct_directivity_function(t, p)\n for (t, p) in zip(self.Et_funcs, self.Ep_funcs)\n ]\n self.rD_funcs = [\n construct_directivity_function(t, p)\n for (t, p) in zip(self.rEt_funcs, self.rEp_funcs)\n ]\n\n def construct_jones_matrix_functions(self, imap=\"default\"):\n if imap == \"default\":\n imap = {\"Et\": (0, 0), \"Ep\": (0, 1), \"rEt\": (1, 0), \"rEp\": (1, 1)}\n\n def construct_jones_matrix_func(Et, Ep, rEt, rEp, imap):\n def J_func(theta, phi):\n theta = np.array(theta)\n phi = np.array(phi)\n\n J_out = np.zeros((theta.size, 2, 2), dtype=np.complex128)\n\n J_out[:, imap[\"Et\"][0], imap[\"Et\"][1]] = Et(theta, phi)\n J_out[:, imap[\"Ep\"][0], imap[\"Ep\"][1]] = Ep(theta, phi)\n J_out[:, imap[\"rEt\"][0], imap[\"rEt\"][1]] = rEt(theta, phi)\n J_out[:, imap[\"rEp\"][0], imap[\"rEp\"][1]] = rEp(theta, phi)\n\n return J_out\n\n return J_func\n\n Nfreq = self.nu_axis.size\n self.J_funcs = []\n for ii in range(Nfreq):\n Et_i = self.Et_funcs[ii]\n Ep_i = self.Ep_funcs[ii]\n rEt_i = self.rEt_funcs[ii]\n rEp_i = self.rEp_funcs[ii]\n\n J_func = construct_jones_matrix_func(Et_i, Ep_i, rEt_i, rEp_i, imap)\n self.J_funcs.append(J_func)\n\n\[email protected]\ndef dl_m(el, s, beta, delta):\n L = int((delta.shape[2] + 1) / 2)\n mp = np.arange(-el, el + 1)\n\n # k = np.exp(1j*mp*beta)\n arg = mp * beta\n k = np.cos(arg) + 1j * np.sin(arg)\n\n ms = -el + L - 1\n mf = (el + 1) + (L - 1)\n s_i = -s + L - 1\n\n delta_1 = delta[el, ms:mf, ms:mf]\n delta_2 = delta[el, ms:mf, s_i]\n\n dl_m_out = np.zeros(2 * el + 1, dtype=nb.complex128)\n\n for i_m in range(len(mp)):\n dl_m_out[i_m] = 1j ** (-s - mp[i_m]) * np.sum(\n k * delta_1[:, i_m] * delta_2, axis=0\n )\n\n return dl_m_out\n\n\[email protected](parallel=True)\ndef ssht_numba_series_eval(f_lm, s, L, delta, theta, phi):\n f = np.zeros(len(theta), dtype=nb.complex128)\n\n spin_sign = (-1.0) ** s\n for i in nb.prange(len(theta)):\n for el in range(L):\n m_axis = np.arange(-el, el + 1)\n\n phases = m_axis * phi[i]\n sY_elm = (\n spin_sign\n * np.sqrt((2.0 * el + 1.0) / 4.0 / np.pi)\n * (np.cos(phases) + 1j * np.sin(phases))\n )\n sY_elm *= dl_m(el, s, theta[i], delta)\n\n j0 = el * (el + 1) - el\n j1 = el * (el + 1) + el\n\n f[i] += np.sum(sY_elm * f_lm[j0 : j1 + 1])\n\n return f\n"
] | [
[
"scipy.interpolate.RectBivariateSpline",
"numpy.sqrt",
"numpy.abs",
"numpy.arange",
"numpy.flipud",
"numpy.cos",
"numpy.sin",
"scipy.interpolate.Rbf",
"numpy.append",
"scipy.interpolate.interp1d",
"numpy.diff",
"numpy.where",
"numpy.array",
"numpy.flip",
"numpy.zeros",
"numpy.sum"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
PHASTA/vtkpytools | [
"8c387dad3bb33cc6332eaf69346b1c5f4e59e2be"
] | [
"vtkpytools/common.py"
] | [
"import vtk\nimport pyvista as pv\nimport numpy as np\nfrom scipy.io import FortranFile\nfrom pathlib import Path\nimport re\n\ndef unstructuredToPoly(unstructured_grid):\n \"\"\"Convert vtk.UnstructruedGrid to vtk.PolyData\"\"\"\n geom = vtk.vtkGeometryFilter()\n geom.SetInputData(unstructured_grid)\n geom.Update()\n return pv.wrap(geom.GetOutput())\n\ndef orderPolyDataLine(polydata):\n \"\"\"Put line PolyData points in order\"\"\"\n strip = vtk.vtkStripper()\n strip.SetInputData(polydata)\n strip.Update()\n return pv.wrap(strip.GetOutput())\n\ndef vCutter(input_data, cut_function):\n \"\"\"Returns the intersection of input_data and cut_function\n\n Wrapper around vtkCutter filter. Output contains interpolated data from\n input_data to the intersection location. Note that cell data is NOT passed\n through.\n\n Parameters\n ----------\n input_data : pyvista.PointSet\n Data that will be cut by the cut_function. Intersected point will have\n data interpolated.\n cut_function : vtk.vtkImplicitFunction\n VTK function that cuts the input_data. Most common example would be\n vtkPlane.\n \"\"\"\n\n cutter = vtk.vtkCutter()\n cutter.SetCutFunction(cut_function)\n cutter.SetInputData(input_data)\n cutter.Update()\n return pv.wrap(cutter.GetOutput())\n\nclass Profile(pv.PolyData):\n \"\"\"Wrap of pyvista.PolyData that includes walldata attribute\n\n Use case is for storage of wall local data (boundary layer metrics, Cf\n etc.) with profiles that correspond to that wall local data.\n\n \"\"\"\n walldata = {}\n\n def setWallDataFromPolyDataPoint(self, PolyPoint):\n \"\"\"Set walldata attribute from PolyData Point\n\n Primary use case is the using the output of vtkpytools.vCutter()\n \"\"\"\n if PolyPoint.n_points != 1:\n raise RuntimeError('Profile should only have 1 wallpoint, {:d} given'.format(\n PolyPoint.n_points))\n self.walldata = dict(PolyPoint.point_arrays)\n self.walldata['Point'] = PolyPoint.points\n\ndef readBinaryArray(path, ncols) -> np.ndarray:\n \"\"\"Get array from Fortran binary file.\n\n Parameters\n ----------\n\n path : Path\n Path to Fortran binary array.\n ncols : uint\n Number of columns in the binary file.\n \"\"\"\n array = FortranFile(path, 'r')\n array = array.read_reals()\n nrows = int(array.shape[0]/ncols)\n array = np.reshape(array, (nrows, ncols))\n\n return array\n\ndef globFile(globstring, path: Path, regex=False) -> Path:\n \"\"\" Glob for one file in directory, then return.\n\n If it finds more than one file matching the globstring, it will error out.\n\n Parameters\n ----------\n globstring : str\n String used to glob for file\n path : Path\n Path where file should be searched for\n regex : bool\n Whether globstring should be interpreted by Python regex (default is\n False)\n \"\"\"\n if not regex:\n globlist = list(path.glob(globstring))\n if len(globlist) == 1:\n assert globlist[0].is_file()\n return globlist[0]\n elif len(globlist) > 1:\n raise RuntimeError('Found multiple files matching'\n '\"{}\" in {}:\\n\\t{}'.format(globstring, path,\n '\\n\\t'.join([x.as_posix() for x in globlist])))\n else:\n raise RuntimeError('Could not find file matching'\n '\"{}\" in {}'.format(globstring, path))\n elif regex:\n filestrings = [x.name for x in path.iterdir()]\n globlist = list(filter(re.compile(globstring).match, filestrings))\n if len(globlist) == 1:\n filePath = path / Path(globlist[0])\n assert filePath.is_file()\n return filePath\n elif len(globlist) > 1:\n raise RuntimeError('Found multiple files matching'\n '\"{}\" in {}:\\n\\t{}'.format(globstring, path,\n '\\n\\t'.join(globlist)))\n else:\n raise RuntimeError('Could not find file matching'\n '\"{}\" in {}'.format(globstring, path))\n\n"
] | [
[
"numpy.reshape",
"scipy.io.FortranFile"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
lisiqi19971013/Attention-based-Multi-modal-Fusion-Network-for-Semantic-Scene-Completion | [
"046fd7b79af98c482cb56845df13f954c1f4b69b"
] | [
"SemanticSceneCompletion/NYU_RGBD/NYU_RGBD_DataLoader.py"
] | [
"import torch\nimport numpy as np\nimport torch.utils.data as torch_data\nfrom PIL import Image\nimport os\nfrom SemanticSceneCompletion.NYU_RGBD.NYU_Path import *\nfrom torchvision.transforms import Compose, Normalize, ToTensor\nimport random\n\n\ndef PCA_Jittering(img):\n img = np.asanyarray(img, dtype = 'float32')\n img = img / 255.0\n img_size = int(img.size / 3)\n img1 = img.reshape(img_size, 3)\n img1 = np.transpose(img1)\n img_cov = np.cov([img1[0], img1[1], img1[2]])\n lamda, p = np.linalg.eig(img_cov)\n\n p = np.transpose(p)\n alpha1 = random.normalvariate(0,1)\n alpha2 = random.normalvariate(0,1)\n alpha3 = random.normalvariate(0,1)\n v = np.transpose((alpha1*lamda[0], alpha2*lamda[1], alpha3*lamda[2]))\n add_num = np.dot(p,v)\n\n img2 = np.array([img[:,:,0]+add_num[0], img[:,:,1]+add_num[1], img[:,:,2]+add_num[2]])\n img2 = img2.reshape(3, img_size)\n img2 = np.transpose(img2)\n img2 = img2.reshape(img.shape)\n img2 = img2 * 255.0\n img2[img2<0] = 0\n img2[img2>255] = 255\n img2 = img2.astype(np.uint8)\n\n return Image.fromarray(img2)\n\n\nclass TrainDataLoaderFused(torch_data.Dataset):\n def __init__(self, path, npz_path, train_or_test, label_transform=None, num_classes=12):\n\n super(TrainDataLoaderFused, self).__init__()\n\n fid = open(path, \"r\")\n self.colorlist = []\n for line in fid.readlines():\n line = line.rstrip(\"\\n\")\n if os.path.exists(line):\n self.colorlist.append(line)\n fid.close()\n\n self.npz_path = npz_path\n\n self.num_classes = num_classes\n self.color_transform = Compose([ToTensor(), Normalize([.485, .456, .406], [.229, .224, .225])])\n self.depth_transform = Compose([ToTensor(), Normalize([.5282, .3914, .4266], [.1945, .2480, .1506])])\n self.label_transform = label_transform\n\n self.resize_size = (384, 288)\n\n if train_or_test == 'train':\n self.filelist = np.arange(795)\n else:\n self.filelist = np.arange(654)\n\n self.train_or_test = train_or_test\n\n def __len__(self):\n return len(self.colorlist)\n\n def __getitem__(self, index):\n color = Image.open(self.colorlist[index]).convert('RGB')\n color = color.resize(self.resize_size, Image.ANTIALIAS)\n if self.train_or_test == \"train\":\n color = PCA_Jittering(color)\n color = self.color_transform(color)\n\n if self.train_or_test == 'train':\n depth = Image.open('%s/%06d.png' % (NYU_HHA_PATH_TRAIN, self.filelist[index]+1)).convert('RGB')\n else:\n depth = Image.open('%s/%06d.png' % (NYU_HHA_PATH_TEST, self.filelist[index]+1)).convert('RGB')\n depth = depth.resize(self.resize_size, Image.ANTIALIAS)\n depth = self.depth_transform(depth)\n\n loaddata = np.load(os.path.join(self.npz_path, '%06d.npz' % self.filelist[index]))\n label = torch.LongTensor(loaddata['arr_1'].astype(np.int64))\n label_weight = torch.FloatTensor(loaddata['arr_2'].astype(np.float32))\n mapping = loaddata['arr_3'].astype(np.int64)\n mapping1 = np.ones(8294400, dtype=np.int64)\n mapping1[:] = -1\n ind, = np.where(mapping >= 0)\n mapping1[mapping[ind]] = ind\n mapping2 = torch.autograd.Variable(torch.FloatTensor(mapping1.reshape((1, 1, 240, 144, 240)).astype(np.float32)))\n mapping2 = torch.nn.MaxPool3d(4, 4)(mapping2).data.view(-1).numpy()\n mapping2[mapping2 < 0] = 307200\n depth_mapping_3d = torch.LongTensor(mapping2.astype(np.int64))\n\n return color, depth, label, label_weight, depth_mapping_3d\n"
] | [
[
"numpy.dot",
"numpy.linalg.eig",
"numpy.arange",
"numpy.ones",
"torch.nn.MaxPool3d",
"numpy.cov",
"numpy.asanyarray",
"numpy.transpose",
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
shanedrinion/GamestonkTerminal | [
"baf36aa7c96de6918911c7a263cf5ac9648b27e3"
] | [
"gamestonk_terminal/technical_analysis/momentum.py"
] | [
"import argparse\nimport matplotlib.pyplot as plt\nimport pandas_ta as ta\nfrom pandas.plotting import register_matplotlib_converters\nfrom gamestonk_terminal.helper_funcs import (\n check_positive,\n parse_known_args_and_warn,\n plot_autoscale,\n)\nfrom gamestonk_terminal.config_plot import PLOT_DPI\nfrom gamestonk_terminal import feature_flags as gtff\n\nregister_matplotlib_converters()\n\n\ndef cci(l_args, s_ticker, s_interval, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"cci\",\n description=\"\"\"\n The CCI is designed to detect beginning and ending market trends.\n The range of 100 to -100 is the normal trading range. CCI values outside of this\n range indicate overbought or oversold conditions. You can also look for price\n divergence in the CCI. If the price is making new highs, and the CCI is not,\n then a price correction is likely.\n \"\"\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--length\",\n action=\"store\",\n dest=\"n_length\",\n type=check_positive,\n default=14,\n help=\"length\",\n )\n parser.add_argument(\n \"-s\",\n \"--scalar\",\n action=\"store\",\n dest=\"n_scalar\",\n type=check_positive,\n default=0.015,\n help=\"scalar\",\n )\n parser.add_argument(\n \"-o\",\n \"--offset\",\n action=\"store\",\n dest=\"n_offset\",\n type=check_positive,\n default=0,\n help=\"offset\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Daily\n if s_interval == \"1440min\":\n df_ta = ta.cci(\n high=df_stock[\"High\"],\n low=df_stock[\"Low\"],\n close=df_stock[\"Adj Close\"],\n length=ns_parser.n_length,\n scalar=ns_parser.n_scalar,\n offset=ns_parser.n_offset,\n ).dropna()\n\n # Intraday\n else:\n df_ta = ta.cci(\n high=df_stock[\"High\"],\n low=df_stock[\"Low\"],\n close=df_stock[\"Close\"],\n length=ns_parser.n_length,\n scalar=ns_parser.n_scalar,\n offset=ns_parser.n_offset,\n ).dropna()\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)\n plt.subplot(211)\n plt.title(f\"Commodity Channel Index (CCI) on {s_ticker}\")\n if s_interval == \"1440min\":\n plt.plot(df_stock.index, df_stock[\"Adj Close\"].values, \"k\", lw=2)\n else:\n plt.plot(df_stock.index, df_stock[\"Close\"].values, \"k\", lw=2)\n plt.xlim(df_stock.index[0], df_stock.index[-1])\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.subplot(212)\n plt.plot(df_ta.index, df_ta.values, \"b\", lw=2)\n plt.xlim(df_stock.index[0], df_stock.index[-1])\n plt.axhspan(100, plt.gca().get_ylim()[1], facecolor=\"r\", alpha=0.2)\n plt.axhspan(plt.gca().get_ylim()[0], -100, facecolor=\"g\", alpha=0.2)\n plt.axhline(100, linewidth=3, color=\"r\", ls=\"--\")\n plt.axhline(-100, linewidth=3, color=\"g\", ls=\"--\")\n plt.xlabel(\"Time\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.gca().twinx()\n plt.ylim(plt.gca().get_ylim())\n plt.yticks([0.2, 0.8], (\"OVERSOLD\", \"OVERBOUGHT\"))\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n\n\ndef macd(l_args, s_ticker, s_interval, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"macd\",\n description=\"\"\"\n The Moving Average Convergence Divergence (MACD) is the difference\n between two Exponential Moving Averages. The Signal line is an Exponential Moving\n Average of the MACD. \\n \\n The MACD signals trend changes and indicates the start\n of new trend direction. High values indicate overbought conditions, low values\n indicate oversold conditions. Divergence with the price indicates an end to the\n current trend, especially if the MACD is at extreme high or low values. When the MACD\n line crosses above the signal line a buy signal is generated. When the MACD crosses\n below the signal line a sell signal is generated. To confirm the signal, the MACD\n should be above zero for a buy, and below zero for a sell.\n \"\"\",\n )\n\n parser.add_argument(\n \"-f\",\n \"--fast\",\n action=\"store\",\n dest=\"n_fast\",\n type=check_positive,\n default=12,\n help=\"The short period.\",\n )\n parser.add_argument(\n \"-s\",\n \"--slow\",\n action=\"store\",\n dest=\"n_slow\",\n type=check_positive,\n default=26,\n help=\"The long period.\",\n )\n parser.add_argument(\n \"--signal\",\n action=\"store\",\n dest=\"n_signal\",\n type=check_positive,\n default=9,\n help=\"The signal period.\",\n )\n parser.add_argument(\n \"-o\",\n \"--offset\",\n action=\"store\",\n dest=\"n_offset\",\n type=check_positive,\n default=0,\n help=\"How many periods to offset the result.\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Daily\n if s_interval == \"1440min\":\n df_ta = ta.macd(\n df_stock[\"Adj Close\"],\n fast=ns_parser.n_fast,\n slow=ns_parser.n_slow,\n signal=ns_parser.n_signal,\n offset=ns_parser.n_offset,\n ).dropna()\n\n # Intraday\n else:\n df_ta = ta.macd(\n df_stock[\"Close\"],\n fast=ns_parser.n_fast,\n slow=ns_parser.n_slow,\n signal=ns_parser.n_signal,\n offset=ns_parser.n_offset,\n ).dropna()\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)\n plt.subplot(211)\n plt.title(f\"Moving Average Convergence Divergence (MACD) on {s_ticker}\")\n if s_interval == \"1440min\":\n plt.plot(df_stock.index, df_stock[\"Adj Close\"].values, \"k\", lw=2)\n else:\n plt.plot(df_stock.index, df_stock[\"Close\"].values, \"k\", lw=2)\n plt.xlim(df_stock.index[0], df_stock.index[-1])\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.subplot(212)\n plt.plot(df_ta.index, df_ta.iloc[:, 0].values, \"b\", lw=2)\n plt.plot(df_ta.index, df_ta.iloc[:, 2].values, \"r\", lw=2)\n plt.bar(df_ta.index, df_ta.iloc[:, 1].values, color=\"g\")\n plt.legend(\n [\n f\"MACD Line {df_ta.columns[0]}\",\n f\"Signal Line {df_ta.columns[2]}\",\n f\"Histogram {df_ta.columns[1]}\",\n ]\n )\n plt.xlim(df_stock.index[0], df_stock.index[-1])\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.xlabel(\"Time\")\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n\n\ndef rsi(l_args, s_ticker, s_interval, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"rsi\",\n description=\"\"\"\n The Relative Strength Index (RSI) calculates a ratio of the\n recent upward price movements to the absolute price movement. The RSI ranges\n from 0 to 100. The RSI is interpreted as an overbought/oversold indicator when\n the value is over 70/below 30. You can also look for divergence with price. If\n the price is making new highs/lows, and the RSI is not, it indicates a reversal.\n \"\"\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--length\",\n action=\"store\",\n dest=\"n_length\",\n type=check_positive,\n default=14,\n help=\"length\",\n )\n parser.add_argument(\n \"-s\",\n \"--scalar\",\n action=\"store\",\n dest=\"n_scalar\",\n type=check_positive,\n default=100,\n help=\"scalar\",\n )\n parser.add_argument(\n \"-d\",\n \"--drift\",\n action=\"store\",\n dest=\"n_drift\",\n type=check_positive,\n default=1,\n help=\"drift\",\n )\n parser.add_argument(\n \"-o\",\n \"--offset\",\n action=\"store\",\n dest=\"n_offset\",\n type=check_positive,\n default=0,\n help=\"offset\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Daily\n if s_interval == \"1440min\":\n df_ta = ta.rsi(\n df_stock[\"Adj Close\"],\n length=ns_parser.n_length,\n scalar=ns_parser.n_scalar,\n drift=ns_parser.n_drift,\n offset=ns_parser.n_offset,\n ).dropna()\n\n # Intraday\n else:\n df_ta = ta.rsi(\n df_stock[\"Close\"],\n length=ns_parser.n_length,\n scalar=ns_parser.n_scalar,\n drift=ns_parser.n_drift,\n offset=ns_parser.n_offset,\n ).dropna()\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)\n plt.subplot(211)\n if s_interval == \"1440min\":\n plt.plot(df_stock.index, df_stock[\"Adj Close\"].values, \"k\", lw=2)\n else:\n plt.plot(df_stock.index, df_stock[\"Close\"].values, \"k\", lw=2)\n plt.title(f\"Relative Strength Index (RSI) on {s_ticker}\")\n plt.xlim(df_stock.index[0], df_stock.index[-1])\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.subplot(212)\n plt.plot(df_ta.index, df_ta.values, \"b\", lw=2)\n plt.xlim(df_stock.index[0], df_stock.index[-1])\n plt.axhspan(70, 100, facecolor=\"r\", alpha=0.2)\n plt.axhspan(0, 30, facecolor=\"g\", alpha=0.2)\n plt.axhline(70, linewidth=3, color=\"r\", ls=\"--\")\n plt.axhline(30, linewidth=3, color=\"g\", ls=\"--\")\n plt.xlabel(\"Time\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.ylim([0, 100])\n plt.gca().twinx()\n plt.ylim(plt.gca().get_ylim())\n plt.yticks([0.15, 0.85], (\"OVERSOLD\", \"OVERBOUGHT\"))\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n\n\ndef stoch(l_args, s_ticker, s_interval, df_stock):\n parser = argparse.ArgumentParser(\n add_help=False,\n prog=\"stoch\",\n description=\"\"\"\n The Stochastic Oscillator measures where the close is in relation\n to the recent trading range. The values range from zero to 100. %D values over 75\n indicate an overbought condition; values under 25 indicate an oversold condition.\n When the Fast %D crosses above the Slow %D, it is a buy signal; when it crosses\n below, it is a sell signal. The Raw %K is generally considered too erratic to use\n for crossover signals.\n \"\"\",\n )\n\n parser.add_argument(\n \"-k\",\n \"--fastkperiod\",\n action=\"store\",\n dest=\"n_fastkperiod\",\n type=check_positive,\n default=14,\n help=\"The time period of the fastk moving average\",\n )\n parser.add_argument(\n \"-d\",\n \"--slowdperiod\",\n action=\"store\",\n dest=\"n_slowdperiod\",\n type=check_positive,\n default=3,\n help=\"The time period of the slowd moving average\",\n )\n parser.add_argument(\n \"--slowkperiod\",\n action=\"store\",\n dest=\"n_slowkperiod\",\n type=check_positive,\n default=3,\n help=\"The time period of the slowk moving average\",\n )\n parser.add_argument(\n \"-o\",\n \"--offset\",\n action=\"store\",\n dest=\"n_offset\",\n type=check_positive,\n default=0,\n help=\"offset\",\n )\n\n try:\n ns_parser = parse_known_args_and_warn(parser, l_args)\n if not ns_parser:\n return\n\n # Daily\n if s_interval == \"1440min\":\n df_ta = ta.stoch(\n high=df_stock[\"High\"],\n low=df_stock[\"Low\"],\n close=df_stock[\"Adj Close\"],\n k=ns_parser.n_fastkperiod,\n d=ns_parser.n_slowdperiod,\n smooth_k=ns_parser.n_slowkperiod,\n offset=ns_parser.n_offset,\n ).dropna()\n\n # Intraday\n else:\n df_ta = ta.stoch(\n high=df_stock[\"High\"],\n low=df_stock[\"Low\"],\n close=df_stock[\"Close\"],\n k=ns_parser.n_fastkperiod,\n d=ns_parser.n_slowdperiod,\n smooth_k=ns_parser.n_slowkperiod,\n offset=ns_parser.n_offset,\n ).dropna()\n\n plt.figure(figsize=plot_autoscale(), dpi=PLOT_DPI)\n plt.subplot(211)\n if s_interval == \"1440min\":\n plt.plot(df_stock.index, df_stock[\"Adj Close\"].values, \"k\", lw=2)\n else:\n plt.plot(df_stock.index, df_stock[\"Close\"].values, \"k\", lw=2)\n plt.title(f\"Stochastic Relative Strength Index (STOCH RSI) on {s_ticker}\")\n plt.xlim(df_stock.index[0], df_stock.index[-1])\n plt.ylabel(\"Share Price ($)\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.subplot(212)\n plt.plot(df_ta.index, df_ta.iloc[:, 0].values, \"k\", lw=2)\n plt.plot(df_ta.index, df_ta.iloc[:, 1].values, \"b\", lw=2, ls=\"--\")\n plt.xlim(df_stock.index[0], df_stock.index[-1])\n plt.axhspan(80, 100, facecolor=\"r\", alpha=0.2)\n plt.axhspan(0, 20, facecolor=\"g\", alpha=0.2)\n plt.axhline(80, linewidth=3, color=\"r\", ls=\"--\")\n plt.axhline(20, linewidth=3, color=\"g\", ls=\"--\")\n plt.legend([f\"%K {df_ta.columns[0]}\", f\"%D {df_ta.columns[1]}\"])\n plt.xlabel(\"Time\")\n plt.grid(b=True, which=\"major\", color=\"#666666\", linestyle=\"-\")\n plt.minorticks_on()\n plt.grid(b=True, which=\"minor\", color=\"#999999\", linestyle=\"-\", alpha=0.2)\n plt.ylim([0, 100])\n plt.gca().twinx()\n plt.ylim(plt.gca().get_ylim())\n plt.yticks([0.1, 0.9], (\"OVERSOLD\", \"OVERBOUGHT\"))\n\n if gtff.USE_ION:\n plt.ion()\n\n plt.show()\n\n print(\"\")\n\n except Exception as e:\n print(e)\n print(\"\")\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.axhline",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.axhspan",
"matplotlib.pyplot.minorticks_on",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.subplot",
"pandas.plotting.register_matplotlib_converters",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.bar",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.yticks",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
frosty110/Book_Projects | [
"49566d615fbfe686fc8a489345a61a3a480912b6"
] | [
"Data Science/Applied Data science with Python/Resources-python/DecisionTreesIris.py"
] | [
"# -*- coding: utf-8 -*-\n\"\"\"\n-----------------------------------------------------------------------------\n\n Decision Trees : Classifying Iris\n\n Copyright : V2 Maestros @2015\n \nProblem Statement\n*****************\nThe input data is the iris dataset. It contains recordings of \ninformation about flower samples. For each sample, the petal and \nsepal length and width are recorded along with the type of the \nflower. We need to use this dataset to build a decision tree \nmodel that can predict the type of flower based on the petal \nand sepal information.\n\n## Techniques Used\n\n1. Decision Trees \n2. Training and Testing\n3. Confusion Matrix\n\n\n-----------------------------------------------------------------------------\n\"\"\"\n\nfrom pandas import Series, DataFrame\nimport pandas as pd\nimport numpy as np\nimport os\nimport matplotlib.pylab as plt\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.metrics import classification_report\nimport sklearn.metrics\n\nos.chdir(\"C:\\Personal\\V2Maestros\\Modules\\Machine Learning Algorithms\\Decision Trees\")\n\n\"\"\"\nData Engineering and Analysis\n\"\"\"\n#Load the dataset\n\niris_data = pd.read_csv(\"iris.csv\")\n\niris_data.dtypes\niris_data.describe()\niris_data.head()\n\n\"\"\"\n1. The ranges of values in each of the variables (columns) look ok without any kind of outliers\n\n2. There is equal distribution of the three classes - setosa, versicolor and virginia\n\nNo cleansing required\n\n\"\"\"\n#Exploratory Data Analysis\n\nplt.scatter(iris_data['Petal.Length'],iris_data['Petal.Width'])\nplt.cla()\nplt.scatter(iris_data['Sepal.Length'], iris_data['Sepal.Width'])\nplt.cla()\n\nplt.boxplot([[iris_data['Petal.Length'][iris_data.Species=='setosa']],\n [iris_data['Petal.Length'][iris_data.Species=='versicolor']] ,\n [iris_data['Petal.Length'][iris_data.Species=='virginica']] ],\n labels=('setosa','versicolor','virginica'))\nplt.cla() \n\nplt.boxplot([[iris_data['Petal.Width'][iris_data.Species=='setosa']],\n [iris_data['Petal.Width'][iris_data.Species=='versicolor']] ,\n [iris_data['Petal.Width'][iris_data.Species=='virginica']] ],\n labels=('setosa','versicolor','virginica'))\n \nplt.cla()\nplt.boxplot([[iris_data['Sepal.Length'][iris_data.Species=='setosa']],\n [iris_data['Sepal.Length'][iris_data.Species=='versicolor']] ,\n [iris_data['Sepal.Length'][iris_data.Species=='virginica']] ],\n labels=('setosa','versicolor','virginica'))\n \nplt.cla()\nplt.boxplot([[iris_data['Sepal.Width'][iris_data.Species=='setosa']],\n [iris_data['Sepal.Width'][iris_data.Species=='versicolor']] ,\n [iris_data['Sepal.Width'][iris_data.Species=='virginica']] ],\n labels=('setosa','versicolor','virginica'))\n \n\"\"\"\nAll 3 except Sepal Width seem to bring the significant differenciation\n between the 3 classes\n\"\"\"\n#Correlations\niris_data.corr()\n\n\"\"\"\nModeling and Prediction\n\"\"\"\n#Split into training and testing sets\n\npredictors = iris_data[['Sepal.Length','Sepal.Width','Petal.Length','Petal.Width']]\ntargets = iris_data.Species\n\npred_train, pred_test, tar_train, tar_test = train_test_split(predictors, targets, test_size=.3)\n\npred_train.shape\npred_test.shape\ntar_train.shape\ntar_test.shape\n\n#Build model on training data\nclassifier=DecisionTreeClassifier()\nclassifier=classifier.fit(pred_train,tar_train)\n\npredictions=classifier.predict(pred_test)\n\nsklearn.metrics.confusion_matrix(tar_test,predictions)\nsklearn.metrics.accuracy_score(tar_test, predictions)\nsklearn.metrics.classification_report(tar_test, predictions)\n\n#Displaying the decision tree\nfrom sklearn import tree\nfrom StringIO import StringIO\nfrom IPython.display import Image\nout = StringIO()\ntree.export_graphviz(classifier, out_file=out)\nimport pydot\ngraph=pydot.graph_from_dot_data(out.getvalue())\nImage(graph.create_png())\n\n\"\"\"\nThe model shows very high accuracy. The reason why the accuracy\n is so high is because, the data itself has very strong signals \n (seperation between the classes). Sepal.Length and Sepal.Width\n have very high correlations and they are used in the decision\n tree. In order to see how the tree will behave if it only had\n Sepal.Length and Sepal.Width, let us remove that data and see\n how accurate the tree is.\n\"\"\"\n#Split into training and testing sets\n\n#Only pick 2 features\npredictors = iris_data[['Sepal.Length','Sepal.Width']]\ntargets = iris_data.Species\n\npred_train, pred_test, tar_train, tar_test = train_test_split(predictors, targets, test_size=.3)\n\npred_train.shape\npred_test.shape\ntar_train.shape\ntar_test.shape\n\n#Build model on training data\nclassifier=DecisionTreeClassifier()\nclassifier=classifier.fit(pred_train,tar_train)\n\npredictions=classifier.predict(pred_test)\n\nsklearn.metrics.confusion_matrix(tar_test,predictions)\nsklearn.metrics.accuracy_score(tar_test, predictions)\nsklearn.metrics.classification_report(tar_test, predictions)\n\n\n\"\"\"\nThere is a big drop in accuracy score to 60% from 90% when\ntop predictor variables are removed from the dataset.\n\"\"\""
] | [
[
"sklearn.tree.export_graphviz",
"pandas.read_csv",
"sklearn.cross_validation.train_test_split",
"matplotlib.pylab.boxplot",
"matplotlib.pylab.scatter",
"matplotlib.pylab.cla",
"sklearn.tree.DecisionTreeClassifier"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
RohitNagraj/SelfDrivingCar | [
"4992ecac64a18de16a4ccc2e3ea27358d70fc8b5"
] | [
"Drive.py"
] | [
"import argparse\nimport base64\nfrom datetime import datetime\nimport os\nimport shutil\nimport numpy as np\nimport socketio\nimport eventlet\nimport eventlet.wsgi\nfrom PIL import Image\nfrom flask import Flask\nfrom io import BytesIO\nfrom keras.models import load_model\nimport utils\n\nsio = socketio.Server()\n\napp = Flask(__name__)\n\nmodel = None\nprev_image_array = None\n\nMAX_SPEED = 25\nMIN_SPEED = 10\n\nspeed_limit = MAX_SPEED\n\n\[email protected]('telemetry')\ndef telemetry(data):\n if data:\n speed = float(data[\"speed\"])\n image = Image.open(BytesIO(base64.b64decode(data[\"image\"])))\n try:\n image = np.asarray(image)\n image = utils.preprocess(image)\n image = np.array([image])\n\n steering_angle = float(model.predict(image, batch_size=1))\n\n # lower the throttle as the speed increases\n # if the speed is above the current speed limit, we are on a downhill.\n # make sure we slow down first and then go back to the original max speed.\n\n global speed_limit\n if speed > speed_limit:\n speed_limit = MIN_SPEED\n else:\n speed_limit = MAX_SPEED\n throttle = 1.0 - steering_angle**2 - (speed/speed_limit)**2\n\n print('{} {} {}'.format(steering_angle, throttle, speed))\n send_control(steering_angle, throttle)\n except Exception as e:\n print(e)\n\n else:\n\n sio.emit('manual', data={}, skip_sid=True)\n\n\[email protected]('connect')\ndef connect(sid):\n print(\"connect \", sid)\n send_control(0, 0)\n\n\ndef send_control(steering_angle, throttle):\n sio.emit(\n \"steer\",\n data={\n 'steering_angle': steering_angle.__str__(),\n 'throttle': throttle.__str__()\n },\n skip_sid=True)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Remote Driving')\n parser.add_argument(\n 'model',\n type=str,\n help='Path to model h5 file. Model should be on the same path.'\n )\n\n args = parser.parse_args()\n\n model = load_model(args.model)\n\n app = socketio.Middleware(sio, app)\n\n eventlet.wsgi.server(eventlet.listen(('', 4567)), app)\n"
] | [
[
"numpy.asarray",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
JakeConnors376W/ludwig | [
"d16488aed4821cf89642c967e06f8961f2ab53bd"
] | [
"ludwig/data/dataset.py"
] | [
"#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport h5py\nimport numpy as np\n\n\nclass Dataset:\n def __init__(self, dataset, input_features, output_features, data_hdf5_fp):\n self.dataset = dataset\n\n self.size = min(map(len, self.dataset.values()))\n\n self.input_features = {}\n for feature in input_features:\n feature_name = feature['name']\n self.input_features[feature_name] = feature\n self.output_features = {}\n for feature in output_features:\n feature_name = feature['name']\n self.output_features[feature_name] = feature\n self.features = self.input_features.copy()\n self.features.update(self.output_features)\n self.data_hdf5_fp = data_hdf5_fp\n\n def get(self, feature_name, idx=None):\n if idx is None:\n idx = range(self.size)\n if (self.data_hdf5_fp is None or\n 'in_memory' not in self.features[feature_name]):\n return self.dataset[feature_name][idx]\n if self.features[feature_name]['in_memory']:\n return self.dataset[feature_name][idx]\n\n sub_batch = self.dataset[feature_name][idx]\n\n indices = np.empty((3, len(sub_batch)), dtype=np.int64)\n indices[0, :] = sub_batch\n indices[1, :] = np.arange(len(sub_batch))\n indices = indices[:, np.argsort(indices[0])]\n\n with h5py.File(self.data_hdf5_fp, 'r') as h5_file:\n im_data = h5_file[feature_name + '_data'][indices[0, :], :, :]\n indices[2, :] = np.arange(len(sub_batch))\n indices = indices[:, np.argsort(indices[1])]\n return im_data[indices[2, :]]\n\n def get_dataset(self):\n return self.dataset\n\n def set_dataset(self, dataset):\n self.dataset = dataset\n"
] | [
[
"numpy.argsort"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mahmoodm2/interpret | [
"b28c858667fd38460a0c850aaa23a2692987f070"
] | [
"python/interpret-core/interpret/glassbox/ebm/test/test_ebm.py"
] | [
"# Copyright (c) 2019 Microsoft Corporation\n# Distributed under the MIT software license\n\n# TODO PK add a test for Regression with interactions\n# TODO PK add a test with a real regression dataset\n# TODO PK add a test with more than 1 multiclass interaction\n\nfrom ....test.utils import (\n synthetic_multiclass,\n synthetic_classification,\n adult_classification,\n iris_classification,\n)\nfrom ....test.utils import synthetic_regression\nfrom ..ebm import ExplainableBoostingRegressor, ExplainableBoostingClassifier\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.model_selection import (\n cross_validate,\n StratifiedShuffleSplit,\n train_test_split,\n)\nfrom sklearn.metrics import accuracy_score\nimport pytest\n\nimport warnings\n\n\ndef warn(*args, **kwargs):\n pass\n\n\nwarnings.warn = warn\n\[email protected]\ndef test_unknown_multiclass_category():\n data = iris_classification()\n X_train = data[\"train\"][\"X\"]\n y_train = data[\"train\"][\"y\"]\n\n X_test = data[\"test\"][\"X\"]\n y_test = data[\"test\"][\"y\"]\n\n # Add categorical feature\n X_train['cat_feature'] = [np.random.choice(['a', 'b', 'c']) for x in range(X_train.shape[0])]\n X_test['cat_feature'] = ['d' for x in range(X_test.shape[0])] # Unknown category in test set\n\n # X_train['cat_feature'][1] = np.nan\n # X_test['cat_feature'][1] = np.nan\n\n clf = ExplainableBoostingClassifier()\n clf.fit(X_train, y_train)\n\n # Term contributions for categorical feature should always be 0 in test\n assert np.all(clf.explain_local(X_train).data(0)['scores'][-1] != 0)\n assert np.all(clf.explain_local(X_test).data(0)['scores'][-1] == 0)\n\[email protected]\ndef test_unknown_binary_category():\n data = adult_classification()\n X_tr = data[\"train\"][\"X\"]\n y_tr = data[\"train\"][\"y\"]\n X_te = data[\"test\"][\"X\"]\n y_te = data[\"test\"][\"y\"]\n\n ebm = ExplainableBoostingClassifier(n_jobs=2, outer_bags=2, interactions=[[0, 13], [13, 3], [1, 2]])\n ebm.fit(X_tr, y_tr)\n\n test_point = X_te[[0]].copy()\n perturbed_point = test_point.copy()\n perturbed_point[0, -1] = 'Unseen Categorical' # Change country to unseen value\n\n # Perturbed feature contribution\n country_contrib = ebm.explain_local(test_point).data(0)['scores'][-4]\n perturbed_contrib = ebm.explain_local(perturbed_point).data(0)['scores'][-4]\n\n assert country_contrib != 0\n assert perturbed_contrib == 0\n\n # Perturbed interaction contribution (dim 1)\n country_inter_contrib = ebm.explain_local(test_point).data(0)['scores'][-3]\n perturbed_inter_contrib = ebm.explain_local(perturbed_point).data(0)['scores'][-3]\n\n assert country_inter_contrib != 0\n assert perturbed_inter_contrib == 0\n\n # Perturbed interaction contribution (dim 2)\n country_inter_contrib_2 = ebm.explain_local(test_point).data(0)['scores'][-2]\n perturbed_inter_contrib_2 = ebm.explain_local(perturbed_point).data(0)['scores'][-2]\n\n assert country_inter_contrib_2 != 0\n assert perturbed_inter_contrib_2 == 0\n\n # Sum(logit) differences from decision_function should only come from perturbed columns\n test_logit = ebm.decision_function(test_point)\n perturbed_logit = ebm.decision_function(perturbed_point)\n\n assert test_logit != perturbed_logit\n assert np.allclose(test_logit, (perturbed_logit + country_contrib + country_inter_contrib + country_inter_contrib_2))\n\n\[email protected]\[email protected]\ndef test_ebm_synthetic_multiclass():\n data = synthetic_multiclass()\n X_train = data[\"train\"][\"X\"]\n y_train = data[\"train\"][\"y\"]\n X_test = data[\"test\"][\"X\"]\n y_test = data[\"test\"][\"y\"]\n\n clf = ExplainableBoostingClassifier(n_jobs=-2, interactions=0, outer_bags=2)\n clf.fit(X_train, y_train)\n\n prob_scores = clf.predict_proba(X_train)\n\n within_bounds = (prob_scores >= 0.0).all() and (prob_scores <= 1.0).all()\n assert within_bounds\n\n valid_ebm(clf)\n\n # Smoke test visualization(s)\n ebm_global = clf.explain_global()\n ebm_global.visualize(None)\n fig = ebm_global.visualize(0)\n assert len(fig.data) == 4 # Number of features\n\n ebm_local = clf.explain_local(X_test, y_test)\n ebm_local.visualize(None)\n fig = ebm_local.visualize(0)\n assert len(fig.data) == 3 # Number of classes\n\n\[email protected]\ndef test_ebm_synthetic_multiclass_pairwise():\n data = synthetic_multiclass()\n X = data[\"full\"][\"X\"]\n y = data[\"full\"][\"y\"]\n\n clf = ExplainableBoostingClassifier(n_jobs=-2, interactions=1, outer_bags=2)\n clf.fit(X, y)\n clf.predict_proba(X)\n valid_ebm(clf)\n\n\[email protected]\ndef test_ebm_synthetic_pairwise():\n a = np.random.randint(low=0, high=50, size=1000)\n b = np.random.randint(low=0, high=20, size=1000)\n\n df = pd.DataFrame(np.c_[a, b], columns=[\"a\", \"b\"])\n df[\"y\"] = [\n 1 if (x > 35 and y > 15) or (x < 15 and y < 5) else 0\n for x, y in zip(df[\"a\"], df[\"b\"])\n ]\n\n X = df[[\"a\", \"b\"]]\n y = df[\"y\"]\n\n seed = 1\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=0.20, random_state=seed\n )\n\n clf = ExplainableBoostingClassifier(n_jobs=1, outer_bags=1, interactions=1)\n clf.fit(X_train, y_train)\n\n clf_global = clf.explain_global()\n\n # Low/Low and High/High should learn high scores\n assert clf_global.data(2)[\"scores\"][-1][-1] > 5\n assert clf_global.data(2)[\"scores\"][1][1] > 5\n\n\[email protected]\ndef test_prefit_ebm():\n data = synthetic_classification()\n X = data[\"full\"][\"X\"]\n y = data[\"full\"][\"y\"]\n\n clf = ExplainableBoostingClassifier(n_jobs=1, interactions=0, max_rounds=0)\n clf.fit(X, y)\n\n for _, model_feature_group in enumerate(clf.additive_terms_):\n has_non_zero = np.any(model_feature_group)\n assert not has_non_zero\n\n\ndef test_ebm_synthetic_regression():\n data = synthetic_regression()\n X = data[\"full\"][\"X\"]\n y = data[\"full\"][\"y\"]\n\n clf = ExplainableBoostingRegressor(n_jobs=-2, interactions=0)\n clf.fit(X, y)\n clf.predict(X)\n\n valid_ebm(clf)\n\n\ndef valid_ebm(ebm):\n assert ebm.feature_groups_[0] == [0]\n\n for _, model_feature_group in enumerate(ebm.additive_terms_):\n all_finite = np.isfinite(model_feature_group).all()\n assert all_finite\n\n\ndef test_ebm_synthetic_classification():\n data = synthetic_classification()\n X = data[\"full\"][\"X\"]\n y = data[\"full\"][\"y\"]\n\n clf = ExplainableBoostingClassifier(n_jobs=-2, interactions=0)\n clf.fit(X, y)\n prob_scores = clf.predict_proba(X)\n\n within_bounds = (prob_scores >= 0.0).all() and (prob_scores <= 1.0).all()\n assert within_bounds\n\n valid_ebm(clf)\n\n\ndef _smoke_test_explanations(global_exp, local_exp, port):\n from .... import preserve, show, shutdown_show_server, set_show_addr\n\n set_show_addr((\"127.0.0.1\", port))\n\n # Smoke test: should run without crashing.\n preserve(global_exp)\n preserve(local_exp)\n show(global_exp)\n show(local_exp)\n\n # Check all features for global (including interactions).\n for selector_key in global_exp.selector[global_exp.selector.columns[0]]:\n preserve(global_exp, selector_key)\n\n shutdown_show_server()\n\n\n\[email protected]\[email protected]\ndef test_ebm_uniform():\n from sklearn.metrics import roc_auc_score\n\n data = adult_classification()\n X = data[\"full\"][\"X\"]\n y = data[\"full\"][\"y\"]\n X_tr = data[\"train\"][\"X\"]\n y_tr = data[\"train\"][\"y\"]\n X_te = data[\"test\"][\"X\"]\n y_te = data[\"test\"][\"y\"]\n\n clf = ExplainableBoostingClassifier(binning=\"uniform\", n_jobs=-2, interactions=3)\n n_splits = 3\n ss = StratifiedShuffleSplit(n_splits=n_splits, test_size=0.25, random_state=1337)\n cross_validate(\n clf, X, y, scoring=\"roc_auc\", cv=ss, n_jobs=None, return_estimator=True\n )\n\n clf = ExplainableBoostingClassifier(binning=\"uniform\", n_jobs=-2, interactions=3)\n clf.fit(X_tr, y_tr)\n\n prob_scores = clf.predict_proba(X_te)\n\n within_bounds = (prob_scores >= 0.0).all() and (prob_scores <= 1.0).all()\n assert within_bounds\n\n # Performance\n auc = roc_auc_score(y_te, prob_scores[:, 1])\n assert auc > 0.5\n\n valid_ebm(clf)\n\n global_exp = clf.explain_global()\n local_exp = clf.explain_local(X_te[:5, :], y_te[:5])\n\n _smoke_test_explanations(global_exp, local_exp, 6000)\n\[email protected]\[email protected]\ndef test_ebm_uniform_multiclass():\n data = iris_classification()\n X_train = data[\"train\"][\"X\"]\n y_train = data[\"train\"][\"y\"]\n\n X_test = data[\"test\"][\"X\"]\n y_test = data[\"test\"][\"y\"]\n\n clf = ExplainableBoostingClassifier(binning=\"uniform\")\n clf.fit(X_train, y_train)\n\n assert accuracy_score(y_test, clf.predict(X_test)) > 0.9\n\n global_exp = clf.explain_global()\n local_exp = clf.explain_local(X_test, y_test)\n\n _smoke_test_explanations(global_exp, local_exp, 6001)\n\n\[email protected]\[email protected]\ndef test_ebm_adult():\n from sklearn.metrics import roc_auc_score\n\n data = adult_classification()\n X = data[\"full\"][\"X\"]\n y = data[\"full\"][\"y\"]\n X_tr = data[\"train\"][\"X\"]\n y_tr = data[\"train\"][\"y\"]\n X_te = data[\"test\"][\"X\"]\n y_te = data[\"test\"][\"y\"]\n\n clf = ExplainableBoostingClassifier(n_jobs=-2, interactions=3)\n n_splits = 3\n ss = StratifiedShuffleSplit(n_splits=n_splits, test_size=0.25, random_state=1337)\n cross_validate(\n clf, X, y, scoring=\"roc_auc\", cv=ss, n_jobs=None, return_estimator=True\n )\n\n clf = ExplainableBoostingClassifier(n_jobs=-2, interactions=3)\n clf.fit(X_tr, y_tr)\n\n prob_scores = clf.predict_proba(X_te)\n\n within_bounds = (prob_scores >= 0.0).all() and (prob_scores <= 1.0).all()\n assert within_bounds\n\n # Performance\n auc = roc_auc_score(y_te, prob_scores[:, 1])\n assert auc > 0.5\n\n valid_ebm(clf)\n\n global_exp = clf.explain_global()\n local_exp = clf.explain_local(X_te[:5, :], y_te[:5])\n\n _smoke_test_explanations(global_exp, local_exp, 6000)\n\n\[email protected]\[email protected]\ndef test_ebm_iris():\n data = iris_classification()\n X_train = data[\"train\"][\"X\"]\n y_train = data[\"train\"][\"y\"]\n\n X_test = data[\"test\"][\"X\"]\n y_test = data[\"test\"][\"y\"]\n\n clf = ExplainableBoostingClassifier()\n clf.fit(X_train, y_train)\n\n assert accuracy_score(y_test, clf.predict(X_test)) > 0.9\n\n global_exp = clf.explain_global()\n local_exp = clf.explain_local(X_test, y_test)\n\n _smoke_test_explanations(global_exp, local_exp, 6001)\n\n\[email protected]\[email protected]\ndef test_ebm_sparse():\n \"\"\" Validate running EBM on scipy sparse data\n \"\"\"\n from sklearn.datasets import make_multilabel_classification\n\n np.random.seed(0)\n n_features = 5\n X, y = make_multilabel_classification(\n n_samples=20, sparse=True, n_features=n_features, n_classes=1, n_labels=2\n )\n\n # train linear model\n clf = ExplainableBoostingClassifier()\n clf.fit(X, y)\n\n assert accuracy_score(y, clf.predict(X)) >= 0.8\n global_exp = clf.explain_global()\n local_exp = clf.explain_local(X, y)\n _smoke_test_explanations(global_exp, local_exp, 6002)\n\n\[email protected]\ndef test_zero_validation():\n data = synthetic_classification()\n X = data[\"full\"][\"X\"]\n y = data[\"full\"][\"y\"]\n\n clf = ExplainableBoostingClassifier(n_jobs=1, interactions=2, validation_size=0)\n clf.fit(X, y)\n"
] | [
[
"sklearn.metrics.roc_auc_score",
"numpy.allclose",
"numpy.random.seed",
"numpy.random.choice",
"numpy.isfinite",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.datasets.make_multilabel_classification",
"numpy.any",
"sklearn.model_selection.cross_validate",
"sklearn.model_selection.StratifiedShuffleSplit",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
coleplante16/Bitcoin-Analyzer | [
"e3f1a564fe1e0880dccbef53f05df9e6f49b8c96"
] | [
"Wrangled.py"
] | [
"from termcolor import colored\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nimport pandas\nimport pyyed\nimport numpy\n\ndef Wrangled(transactions3, transactions, address):\n for i in transactions3:\n transactions += i\n\n# Remove HTML notation\n transactions4 = []\n for i in transactions:\n i = re.sub('<.+?>', ' ', i)\n i = re.sub(',', '', i)\n\n\n i = ' '.join(i.split())\n transactions4.append(i)\n\n# Split into list and remove extra characters\n var1 = []\n for i in transactions4:\n var1 += list(i.split(' '))\n try:\n var1.remove('[')\n var1.remove(']')\n except:\n pass\n\n# Only grab transactional information\n var2 = []\n for item in var1:\n if len(item) > 2:\n var2.append(item)\n\n\n#\n indices = [i for i, x in enumerate(var2) if x == \"Hash\"]\n indices2 = []\n indices2 = indices[1:]\n indices2.append(len(var2))\n\n\n var3 = []\n counter1 = 0\n for x in range(len(indices)):\n var3 += [var2[indices[counter1]: indices2[counter1]]]\n counter1 += 1\n\n\n# Remove extra information\n var4 = []\n for i in var3:\n last = (len(i) - 1)\n first = last - 17\n del i[first:last]\n del i[4]\n if 'Load' in i:\n last1 = (len(i) - 3)\n first1 = last1 - 5\n del i[first1:last1]\n var4.append(i)\n\n for subList in var4:\n position = 0\n length = len(subList)\n for word in subList:\n position = position + 1\n if word == 'BTC' and position < length:\n subList.insert(position, 'To')\n\n# Made Data Frame for Display\n column_names2 = []\n rows = []\n for i in var4:\n column_names2 += i[::2]\n rows += i[1::2]\n #rows.append()\n transactionsChart = pandas.DataFrame(rows, column_names2)\n print(transactionsChart)\n\n\n#Export as CSV\n from export import exportCSV\n exportCSV(transactionsChart, address)\n\n\n#Export as Graphical Confluence Network\n from export import export_yEd\n export_yEd(address, var4,)"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nirbarazida/NBprocessing | [
"3b020829e9c2ec0ef67b881a69637ac14e635e75"
] | [
"NBprocessing/general/_NBgeneral_class.py"
] | [
"\"\"\"\nGeneric functions to manipulate features in pandas data frame.\n\nThis library include the functions:\n 1. missing_values(database):\n prints a data frame with all columns that have missing values.\n for every column will print the number of missing values and the present of it out of total index in the column\n\nCreated by: Nir Barazida\nGood luck!\n\"\"\"\n\nimport pandas as pd\nfrom NBprocessing.general._input_check_general import _InputCheckGeneral\nfrom sklearn.model_selection import train_test_split\nfrom NBprocessing.src import constance_object\n\nclass NBgeneral(object):\n \"\"\"\n Generic functions to manipulate features in pandas data frame.\n\n This library include the functions:\n 1. missing_values(database):\n prints a data frame with all columns that have missing values.\n for every column will print the number of missing values and the present of it out of total index in the column\n\n 2. split_and_check(database, column_name, test_size=0.3):\n Gets a database and target column, split and return:\n 2 different data sets splitted by the ratio defined in 'test_size' variable\n etch data set will be split to main data and target column\n total 4 variables\n Will also print the shape of every data set.\n\n Created by: Nir Barazida\n Good luck!\n \"\"\"\n\n @staticmethod\n @_InputCheckGeneral._missing_values_checker\n def missing_values(database):\n \"\"\"\n General Information\n ----------\n prints a data frame with all columns that have missing values.\n for every column will print the number of missing values and the present of it out of total index in the column\n\n Parameters\n ----------\n :param database: pandas Data Frame\n data set to fill missing values in.\n\n Returns\n -------\n None.\n prints a data frame with all columns that have missing values.\n\n Raises\n ------\n ValueError : If input value not as mentioned above.\n \"\"\"\n\n columns_missing_values = (database.count() / len(database)) < 1\n missing_values = database.loc[:, columns_missing_values].isnull().sum().sort_values(ascending=False)\n missing_value_df = pd.concat([missing_values, 100 * round(missing_values / len(database), 3)],\n axis=1, keys=[\"#Missing_values\", \"%Missing_values\"])\n return missing_value_df\n\n @staticmethod\n @_InputCheckGeneral._split_and_check_checker\n def split_and_check(database, column_name, test_size=0.3):\n \"\"\"\n General Information\n ----------\n Gets a database and target column, split and return:\n 2 different data sets splitted by the ratio defined in 'test_size' variable\n etch data set will be split to main data and target column\n total 4 variables\n Will also print the shape of every data set.\n\n Parameters\n ----------\n :param database: pandas Data Frame\n data set to fill missing values in.\n\n :param column_name: string\n the name of the target column\n\n :param test_size: float range 0<x<1\n the ratio that the data set will be split by\n\n\n Returns\n -------\n X_train, X_test, y_train, y_test\n print the shape of every data set.\n\n Raises\n ------\n ValueError : If input value not as mentioned above.\n \"\"\"\n\n X = database.drop(column_name, axis=1)\n y = database[column_name]\n X_train, X_test, y_train, y_test = train_test_split(X, y,\n test_size=test_size, random_state=42)\n print(constance_object.SPLIT_AND_CHECK.format(X_train.shape,y_train.shape,\n X_test.shape,y_test.shape))\n return X_train, X_test, y_train, y_test\n\n\n"
] | [
[
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
parasKumarSahu/Knolml-Analysis | [
"01dd401a264289d1aeb793bf49fb9da1c84575ba"
] | [
"Text-Segmentation/start_segmentation.py"
] | [
"import os\nimport word2vec\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_extraction.text import CountVectorizer\nimport re\nimport nltk\nimport matplotlib\nfrom tools import get_penalty, get_segments\nfrom algorithm import split_optimal, split_greedy, get_total\nimport sys\n\n#Main Function\nif len(sys.argv) < 2:\n print(\"Input Format: python3 script_name input_file_name\")\n exit()\n \nbook_path = sys.argv[1]\n\ncorpus_path = './text8' \n\nwrdvec_path = 'wrdvecs-text8.bin'\nif not os.path.exists(wrdvec_path):\n word2vec.word2vec(corpus_path, wrdvec_path, cbow=1, iter_=5, hs=1, threads=4, sample='1e-5', window=15, size=200, binary=1)\n\nmodel = word2vec.load(wrdvec_path)\nwrdvecs = pd.DataFrame(model.vectors, index=model.vocab)\ndel model\nprint(wrdvecs.shape)\n\nnltk.download('punkt')\nsentence_analyzer = nltk.data.load('tokenizers/punkt/english.pickle')\n\nsegment_len = 30 # segment target length in sentences\n\nwith open(book_path, 'rt', encoding=\"utf8\") as f:\n text = f.read().replace('\\n', '¤') # punkt tokenizer handles newlines not so nice\n\nsentenced_text = sentence_analyzer.tokenize(text)\nvecr = CountVectorizer(vocabulary=wrdvecs.index)\n\nsentence_vectors = vecr.transform(sentenced_text).dot(wrdvecs)\n\npenalty = get_penalty([sentence_vectors], segment_len)\nprint('penalty %4.2f' % penalty)\n\noptimal_segmentation = split_optimal(sentence_vectors, penalty, seg_limit=250)\nsegmented_text = get_segments(sentenced_text, optimal_segmentation)\n\nf = open(\"segmentaion_result.csv\", \"w\", encoding=\"utf8\")\nf.write(\"index,text\\n\")\nseg_count = 0\n\nfor s in segmented_text:\n print(\"\\n===========Start of segment===========\\n\")\n print(s[:3], \"...........\", s[-2:-1])\n f.write(str(seg_count)+\",\")\n for ss in s:\n f.write(re.sub('\\W+',' ', ss))\n f.write(\"\\n\") \n print(\"\\n===========End of segment===========\\n\")\n seg_count += 1\n\nf.close()\n\nprint('%d sentences, %d segments, avg %4.2f sentences per segment' % (\n len(sentenced_text), len(segmented_text), len(sentenced_text) / len(segmented_text)))\n\ngreedy_segmentation = split_greedy(sentence_vectors, max_splits=len(optimal_segmentation.splits))\ngreedy_segmented_text = get_segments(sentenced_text, greedy_segmentation)\n\ntotals = [get_total(sentence_vectors, seg.splits, penalty) \n for seg in [optimal_segmentation, greedy_segmentation]]\nprint('optimal score %4.2f, greedy score %4.2f' % tuple(totals))\nprint('ratio of scores %5.4f' % (totals[0] / totals[1]))"
] | [
[
"sklearn.feature_extraction.text.CountVectorizer",
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jordantkohn/ABSA-PyTorch | [
"eece308afb469e319dcef6391535e10d2c498590"
] | [
"test_bayes.py"
] | [
"# -*- coding: utf-8 -*-\n# file: train.py\n# author: songyouwei <[email protected]>\n# Copyright (C) 2018. All Rights Reserved.\n\nimport logging\nimport argparse\nimport math\nimport os\nimport sys\nimport random\nimport numpy\n\nfrom sklearn import metrics\nfrom time import strftime, localtime\n\nfrom transformers import BertModel\n\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader, random_split\n\nfrom data_utils import build_tokenizer, build_embedding_matrix, Tokenizer4Bert, ABSADataset\nfrom models import LSTM, IAN, MemNet, RAM, TD_LSTM, TC_LSTM, Cabasc, ATAE_LSTM, TNet_LF, AOA, MGAN, ASGCN, LCF_BERT\nfrom models import LSTM_BAYES_FC, LSTM_BAYES_RNN\nfrom models import BERT_BAYES_SPC\nfrom models.aen import CrossEntropyLoss_LSR, AEN_BERT\nfrom models.bert_spc import BERT_SPC\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import MaxNLocator\nimport numpy as np\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\nlogger.addHandler(logging.StreamHandler(sys.stdout))\n\n\nclass Instructor:\n def __init__(self, opt):\n self.opt = opt\n\n if 'bert' in opt.model_name:\n tokenizer = Tokenizer4Bert(opt.max_seq_len, opt.pretrained_bert_name)\n bert = BertModel.from_pretrained(opt.pretrained_bert_name)\n self.model = opt.model_class(bert, opt).to(opt.device)\n else:\n tokenizer = build_tokenizer(\n fnames=[opt.dataset_file['train'], opt.dataset_file['test']],\n max_seq_len=opt.max_seq_len,\n dat_fname='{0}_tokenizer.dat'.format(opt.dataset))\n embedding_matrix = build_embedding_matrix(\n word2idx=tokenizer.word2idx,\n embed_dim=opt.embed_dim,\n dat_fname='{0}_{1}_embedding_matrix.dat'.format(str(opt.embed_dim), opt.dataset))\n self.model = opt.model_class(embedding_matrix, opt).to(opt.device)\n\n self.trainset = ABSADataset(opt.dataset_file['train'], tokenizer)\n self.testset = ABSADataset(opt.dataset_file['test'], tokenizer)\n assert 0 <= opt.valset_ratio < 1\n if opt.valset_ratio > 0:\n valset_len = int(len(self.trainset) * opt.valset_ratio)\n self.trainset, self.valset = random_split(self.trainset, (len(self.trainset)-valset_len, valset_len))\n else:\n self.valset = self.testset\n\n if opt.device.type == 'cuda':\n logger.info('cuda memory allocated: {}'.format(torch.cuda.memory_allocated(device=opt.device.index)))\n # self._print_args()\n\n def _print_args(self):\n n_trainable_params, n_nontrainable_params = 0, 0\n for p in self.model.parameters():\n n_params = torch.prod(torch.tensor(p.shape))\n if p.requires_grad:\n n_trainable_params += n_params\n else:\n n_nontrainable_params += n_params\n logger.info('> n_trainable_params: {0}, n_nontrainable_params: {1}'.format(n_trainable_params, n_nontrainable_params))\n logger.info('> training arguments:')\n for arg in vars(self.opt):\n logger.info('>>> {0}: {1}'.format(arg, getattr(self.opt, arg)))\n\n def _reset_params(self):\n for child in self.model.children():\n if type(child) != BertModel: # skip bert params\n for p in child.parameters():\n if p.requires_grad:\n if len(p.shape) > 1:\n self.opt.initializer(p)\n else:\n stdv = 1. / math.sqrt(p.shape[0])\n torch.nn.init.uniform_(p, a=-stdv, b=stdv)\n\n def _test_confidence(self, test_data_loader, n_sample=25):\n n_correct, n_total = 0, 0\n n_correct_not_conf, n_total_not_conf = 0, 0\n n_correct_conf, n_total_conf = 0, 0\n t_targets_all, t_outputs_all = None, None\n correct_pred = []\n # switch model to evaluation mode\n self.model.eval()\n\n with torch.no_grad():\n for i_batch, t_batch in enumerate(test_data_loader):\n t_inputs = [t_batch[col].to(self.opt.device) for col in self.opt.inputs_cols]\n t_targets = t_batch['polarity'].to(self.opt.device)\n # t_outputs = self.model(t_inputs)\n # take 3 outputs per example\n t_outputs_sample = torch.stack([self.model(t_inputs) for i in range(n_sample)])\n t_preds_sample = t_outputs_sample.argmax(axis=2)\n \n # calculate confidence score\n confidence = torch.Tensor([torch.bincount(i).max() for i in t_preds_sample.T]).T\n confidence = confidence/n_sample\n\n # all predicttions\n t_preds = torch.stack([torch.argmax(torch.bincount(i)) for i in t_preds_sample.T])\n\n # only predict if confidence is higher than threshold\n conf_preds = []\n conf_preds_conf = []\n conf_targets = []\n not_conf_preds = []\n not_conf_preds_conf = []\n not_conf_targets = []\n for pred, conf, target in zip(t_preds, confidence, t_targets):\n if conf >= self.opt.pred_threshold:\n conf_preds.append(pred)\n conf_preds_conf.append(conf)\n conf_targets.append(target)\n else:\n not_conf_preds.append(pred)\n not_conf_preds_conf.append(conf)\n not_conf_targets.append(target)\n\n conf_preds = torch.Tensor(conf_preds)\n conf_targets = torch.Tensor(conf_targets)\n\n not_conf_preds = torch.Tensor(not_conf_preds)\n not_conf_targets = torch.Tensor(not_conf_targets)\n\n # keep a list of whether each prediction was correct or not\n correct_pred.append(t_preds == t_targets)\n\n n_correct += (t_preds == t_targets).sum().item()\n n_total += len(t_preds)\n\n n_correct_conf += (conf_preds == conf_targets).sum().item()\n n_total_conf += len(conf_preds)\n\n n_correct_not_conf += (not_conf_preds == not_conf_targets).sum().item()\n n_total_not_conf += len(not_conf_preds)\n\n acc = n_correct / n_total\n\n acc_conf = n_correct_conf / n_total_conf\n percent_conf = n_total_conf / n_total\n\n return acc, acc_conf, percent_conf, correct_pred, confidence\n\n\n def run(self):\n test_data_loader = DataLoader(dataset=self.testset, batch_size=len(self.testset), shuffle=False)\n\n self._reset_params()\n\n saved_model_path = 'state_dict/{}'.format(self.opt.model_statedict)\n self.model.load_state_dict(torch.load(saved_model_path))\n \n acc, acc_conf, percent_conf, correct_pred, confidence = self._test_confidence(test_data_loader)\n logger.info('>> test_acc: {:.4f}, test_acc_conf: {:.4f}, percent_conf: {:4f} '.format(acc, acc_conf, percent_conf))\n\n chart_file = 'graphs/confidence_{}-{}.png'.format(\n self.opt.model_name, strftime(\"%y%m%d-%H%M\", localtime()))\n\n corrects = []\n incorrects = []\n for corr, conf in zip(correct_pred[0],confidence):\n if corr:\n corrects.append(conf.item())\n else:\n incorrects.append(conf.item())\n\n plt.figure(figsize=(8,6))\n plt.title('model confidence for correct and incorrect predictions \\n restaurant test data')\n plt.boxplot([incorrects,corrects], labels=['incorrect', 'correct'], showfliers=True)\n plt.xlabel(\"model prediction\")\n plt.ylabel('model confidence')\n plt.savefig(chart_file)\n\n\ndef main():\n # Hyper Parameters\n parser = argparse.ArgumentParser()\n # parser.add_argument('--model_name', default='bert_spc', type=str)\n parser.add_argument('--model_name', default='bert_bayes_spc', type=str)\n # parser.add_argument('--dataset', default='laptop', type=str, help='twitter, restaurant, laptop')\n parser.add_argument('--dataset', default='restaurant', type=str, help='twitter, restaurant, laptop')\n parser.add_argument('--optimizer', default='adam', type=str)\n parser.add_argument('--initializer', default='xavier_uniform_', type=str)\n parser.add_argument('--lr', default=2e-5, type=float, help='try 5e-5, 2e-5 for BERT, 1e-3 for others')\n parser.add_argument('--dropout', default=0.1, type=float)\n parser.add_argument('--l2reg', default=0.01, type=float)\n parser.add_argument('--num_epoch', default=20, type=int, help='try larger number for non-BERT models')\n parser.add_argument('--batch_size', default=16, type=int, help='try 16, 32, 64 for BERT models')\n parser.add_argument('--log_step', default=10, type=int)\n parser.add_argument('--embed_dim', default=300, type=int)\n parser.add_argument('--hidden_dim', default=300, type=int)\n parser.add_argument('--bert_dim', default=768, type=int)\n parser.add_argument('--pretrained_bert_name', default='bert-base-uncased', type=str)\n parser.add_argument('--max_seq_len', default=85, type=int)\n parser.add_argument('--polarities_dim', default=3, type=int)\n parser.add_argument('--hops', default=3, type=int)\n parser.add_argument('--patience', default=5, type=int)\n parser.add_argument('--device', default=None, type=str, help='e.g. cuda:0')\n parser.add_argument('--seed', default=1234, type=int, help='set seed for reproducibility')\n parser.add_argument('--valset_ratio', default=0, type=float, help='set ratio between 0 and 1 for validation support')\n # The following parameters are only valid for the lcf-bert model\n parser.add_argument('--local_context_focus', default='cdm', type=str, help='local context focus mode, cdw or cdm')\n parser.add_argument('--SRD', default=3, type=int, help='semantic-relative-distance, see the paper of LCF-BERT model')\n parser.add_argument('--model_statedict', default='lstm_bayes_fc_restaurant_val_acc_0.7607', type=str)\n parser.add_argument('--pred_threshold', default=0.66, type=float, help='between 0 and 1')\n opt = parser.parse_args()\n\n if opt.seed is not None:\n random.seed(opt.seed)\n numpy.random.seed(opt.seed)\n torch.manual_seed(opt.seed)\n torch.cuda.manual_seed(opt.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n os.environ['PYTHONHASHSEED'] = str(opt.seed)\n\n model_classes = {\n 'lstm': LSTM,\n 'lstm_bayes_fc': LSTM_BAYES_FC,\n 'lstm_bayes_rnn': LSTM_BAYES_RNN,\n 'td_lstm': TD_LSTM,\n 'tc_lstm': TC_LSTM,\n 'atae_lstm': ATAE_LSTM,\n 'ian': IAN,\n 'memnet': MemNet,\n 'ram': RAM,\n 'cabasc': Cabasc,\n 'tnet_lf': TNet_LF,\n 'aoa': AOA,\n 'mgan': MGAN,\n 'asgcn': ASGCN,\n 'bert_spc': BERT_SPC,\n 'aen_bert': AEN_BERT,\n 'lcf_bert': LCF_BERT,\n 'bert_bayes_spc': BERT_BAYES_SPC,\n # default hyper-parameters for LCF-BERT model is as follws:\n # lr: 2e-5\n # l2: 1e-5\n # batch size: 16\n # num epochs: 5\n }\n dataset_files = {\n 'twitter': {\n 'train': './datasets/acl-14-short-data/train.raw',\n 'test': './datasets/acl-14-short-data/test.raw'\n },\n 'restaurant': {\n 'train': './datasets/semeval14/Restaurants_Train.xml.seg',\n 'test': './datasets/semeval14/Restaurants_Test_Gold.xml.seg'\n },\n 'laptop': {\n 'train': './datasets/semeval14/Laptops_Train.xml.seg',\n 'test': './datasets/semeval14/Laptops_Test_Gold.xml.seg'\n }\n }\n input_colses = {\n 'lstm': ['text_indices'],\n 'lstm_bayes_fc': ['text_indices', 'aspect_indices'],\n 'lstm_bayes_rnn':['text_indices', 'aspect_indices'],\n 'td_lstm': ['left_with_aspect_indices', 'right_with_aspect_indices'],\n 'tc_lstm': ['left_with_aspect_indices', 'right_with_aspect_indices', 'aspect_indices'],\n 'atae_lstm': ['text_indices', 'aspect_indices'],\n 'ian': ['text_indices', 'aspect_indices'],\n 'memnet': ['context_indices', 'aspect_indices'],\n 'ram': ['text_indices', 'aspect_indices', 'left_indices'],\n 'cabasc': ['text_indices', 'aspect_indices', 'left_with_aspect_indices', 'right_with_aspect_indices'],\n 'tnet_lf': ['text_indices', 'aspect_indices', 'aspect_boundary'],\n 'aoa': ['text_indices', 'aspect_indices'],\n 'mgan': ['text_indices', 'aspect_indices', 'left_indices'],\n 'asgcn': ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph'],\n 'bert_spc': ['concat_bert_indices', 'concat_segments_indices'],\n 'bert_bayes_spc': ['concat_bert_indices', 'concat_segments_indices'],\n 'aen_bert': ['text_bert_indices', 'aspect_bert_indices'],\n 'lcf_bert': ['concat_bert_indices', 'concat_segments_indices', 'text_bert_indices', 'aspect_bert_indices'],\n }\n initializers = {\n 'xavier_uniform_': torch.nn.init.xavier_uniform_,\n 'xavier_normal_': torch.nn.init.xavier_normal_,\n 'orthogonal_': torch.nn.init.orthogonal_,\n }\n optimizers = {\n 'adadelta': torch.optim.Adadelta, # default lr=1.0\n 'adagrad': torch.optim.Adagrad, # default lr=0.01\n 'adam': torch.optim.Adam, # default lr=0.001\n 'adamax': torch.optim.Adamax, # default lr=0.002\n 'asgd': torch.optim.ASGD, # default lr=0.01\n 'rmsprop': torch.optim.RMSprop, # default lr=0.01\n 'sgd': torch.optim.SGD,\n }\n opt.model_class = model_classes[opt.model_name]\n opt.dataset_file = dataset_files[opt.dataset]\n opt.inputs_cols = input_colses[opt.model_name]\n opt.initializer = initializers[opt.initializer]\n opt.optimizer = optimizers[opt.optimizer]\n opt.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') \\\n if opt.device is None else torch.device(opt.device)\n\n ins = Instructor(opt)\n ins.run()\n\n\nif __name__ == '__main__':\n main()\n"
] | [
[
"matplotlib.pyplot.boxplot",
"torch.nn.init.uniform_",
"matplotlib.pyplot.title",
"numpy.random.seed",
"torch.cuda.manual_seed",
"matplotlib.pyplot.figure",
"torch.manual_seed",
"torch.load",
"torch.Tensor",
"matplotlib.pyplot.savefig",
"torch.tensor",
"torch.no_grad",
"torch.cuda.is_available",
"torch.device",
"matplotlib.pyplot.xlabel",
"torch.bincount",
"torch.cuda.memory_allocated",
"matplotlib.pyplot.ylabel"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Praznat/annotationmodeling | [
"014b8b94b2225f947691c18b26eb8a4b148d2c8a"
] | [
"sim_keypoints.py"
] | [
"import json\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport simulation\nfrom eval_functions import oks_score_multi\nimport utils\n\ndef alter_location(points, x_offset, y_offset):\n x, y = points.T\n return np.array([x + x_offset, y + y_offset]).T\n\ndef alter_rotation(points, radians):\n centroid = np.mean(points, axis=0)\n return utils.rotate_via_numpy((points - centroid).T, radians) + centroid\n\ndef alter_magnitude(points, percent_diff):\n centroid = np.mean(points, axis=0)\n return (points - centroid) * np.exp(percent_diff) + centroid\n\ndef alter_normal_jump(points, scale):\n return points + np.random.normal(0, scale, points.shape)\n\ndef alter_cauchy_jump(points, scale, abs_bound):\n return points + utils.bounded_cauchy(scale, points.shape, abs_bound)\n\ndef disappear(points, p_disappear):\n return None if np.random.uniform() < p_disappear else points\n\ndef shift_by_uerr(annotation, uerr):\n shifts = [\n alter_rotation(annotation, np.random.normal(0, 0.5 * uerr) * np.pi / 8),\n alter_magnitude(annotation, np.random.normal(0, 0.3 * uerr)),\n alter_normal_jump(annotation, 30 * uerr),\n alter_cauchy_jump(annotation, 30 * uerr, 100),\n ]\n return np.mean(shifts, axis=0) * np.abs(np.sign(annotation))\n\ndef create_user_data(uid, df, pct_items, u_err, difficulty_dict=None, extraarg=None):\n items = df[\"item\"].unique()\n n_items_labeled = int(np.round(pct_items * len(items)))\n items_labeled = sorted(np.random.choice(items, n_items_labeled, replace=False))\n labels = []\n for item in items_labeled:\n gold = df[df[\"item\"] == item][\"gold\"].values[0]\n shifted_kpobjs = [shift_by_uerr(kpobj, u_err) for kpobj in gold]\n kpobjs = [shifted_kpobjs[0]] + [disappear(kp, u_err / 2) for kp in shifted_kpobjs[1:]]\n kpobjs = [kp for kp in kpobjs if kp is not None]\n labels.append(kpobjs)\n dfdict = {\n \"uid\": [uid] * len(items_labeled),\n \"item\": items_labeled,\n \"annotation\": labels,\n }\n return pd.DataFrame(dfdict)\n\nclass KeypointSimulator(simulation.Simulator):\n def __init__(self, rawdata_dir='data/coco/person_keypoints_train2017.json', max_items=500, minlabelsperitem=4):\n with open(rawdata_dir) as f:\n dataset = json.load(f)\n self.category_id_skeletons = {c[\"id\"]: np.array(c[\"skeleton\"])-1 for c in iter(dataset[\"categories\"])}\n \n img_label = {}\n for dataset_annotation in iter(dataset[\"annotations\"]):\n v = img_label.setdefault(dataset_annotation[\"image_id\"], [])\n v.append(dataset_annotation)\n img_label_minlen = {k: v for k, v in img_label.items() if len(v) >= minlabelsperitem} \n \n i = 0\n rows = []\n item = []\n annotation = []\n category = []\n for dataset_annotations in iter(img_label_minlen.values()):\n for dataset_annotation in dataset_annotations:\n kp = np.reshape(dataset_annotation[\"keypoints\"], (-1,3))\n kp = kp[kp[:,2]>-90][:,:2]\n if len(kp) == 0:\n continue\n item.append(dataset_annotation[\"image_id\"])\n annotation.append(kp)\n category.append(dataset_annotation[\"category_id\"])\n i += 1\n if i > max_items:\n break\n kp_df = pd.DataFrame({\"item\":item, \"gold\":annotation, \"category\":category})\n self.df = kp_df.groupby(\"item\")[\"gold\"].apply(list).reset_index()\n self.itemdict = utils.make_categorical(self.df, \"item\")\n\n def create_stan_data(self, n_users, pct_items, err_rates, difficulty_dict):\n self.err_rates = err_rates\n self.difficulty_dict = difficulty_dict\n self.sim_df = simulation.create_sim_df(create_user_data, self.df, n_users, pct_items, err_rates, difficulty_dict)\n stan_data = utils.calc_distances(self.sim_df, (lambda x,y: 1 - oks_score_multi(x, y)), label_colname=\"annotation\", item_colname=\"item\")\n return stan_data\n\n def sim_uerr_fn(self, uerr_a, uerr_b, n_users):\n z = np.abs(np.random.normal(uerr_a, uerr_b, 10000))\n return np.quantile(z, np.linspace(0,1,n_users+2)[1:-1])\n \n def sim_diff_fn(self, difficulty_a, difficulty_b):\n z = 1 * np.random.beta(difficulty_a, difficulty_b, 10000)\n n_items = len(self.df[\"item\"].unique())\n return dict(zip(np.arange(n_items), np.quantile(z, np.linspace(0,1,n_items+2)[1:-1])))"
] | [
[
"numpy.random.beta",
"numpy.linspace",
"numpy.random.choice",
"numpy.reshape",
"numpy.arange",
"pandas.DataFrame",
"numpy.sign",
"numpy.random.normal",
"numpy.mean",
"numpy.exp",
"numpy.random.uniform",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kennethjwalker/pyglet_game_engine | [
"1b8111f72669afa759f729babecf44f4d0c3dc9a"
] | [
"src/testmap.py"
] | [
"import pandas as pd\r\n\r\n\r\n\r\nTILESET = {\r\n 'stone_brick_1': 0,\r\n 'closed_door': 1,\r\n 'player': 2,\r\n 'rect_gray_0_old': 3,\r\n 'shaft': 4,\r\n}\r\n\r\nGROUNDMAP = []\r\nENVMAP = []\r\nOBJMAP = []\r\n\r\ndef create_testmap():\r\n ground_file = r'resources/tiled_files/maps/csv/first_steps_ground_1.csv'\r\n env_file = r'resources/tiled_files/maps/csv/first_steps_env_1.csv'\r\n obj_file = r'resources/tiled_files/maps/csv/first_steps_obj_1.csv'\r\n ground_df = pd.read_csv(ground_file, header=None)\r\n env_df = pd.read_csv(env_file, header=None)\r\n obj_df = pd.read_csv(obj_file, header=None)\r\n\r\n # y data needs to be ready in backwards do to pyglet drawing from btm-left\r\n if(ground_df.size > 0):\r\n for x in range(len(ground_df)):\r\n col = []\r\n for y in range(len(ground_df[x])):\r\n col.append(ground_df[x][len(ground_df[x])-y-1])\r\n GROUNDMAP.append(col)\r\n\r\n if(env_df.size > 0):\r\n for x in range(len(env_df)):\r\n col = []\r\n for y in range(len(env_df[x])):\r\n col.append(env_df[x][len(env_df[x])-y-1])\r\n ENVMAP.append(col)\r\n\r\n if(obj_df.size > 0):\r\n for x in range(len(obj_df)):\r\n col = []\r\n for y in range(len(obj_df[x])):\r\n col.append(obj_df[x][len(obj_df[x])-y-1])\r\n OBJMAP.append(col)\r\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
cothan/binary-samples | [
"33a682e02cefb3d2a73ef34441f193f17c91df27"
] | [
"TRAIN_DATA/training_keras.py"
] | [
"#!/usr/bin/env python3\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout\nfrom keras.layers import Embedding\nfrom keras.layers import LSTM\nfrom keras.preprocessing.text import Tokenizer\nimport numpy as np \nfrom sklearn.model_selection import train_test_split\n# from keras.utils import plot_model\nfrom keras.utils import to_categorical\n\nfrom keras.preprocessing import sequence\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional\n\n\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder, StandardScaler\n\nmax_features = 20\nseq_length = 8\nbatch_size = 32\n\n\narchs = {'aarch64-rp3': 0,\n'alphaev56': 1,\n'alphaev67': 2,\n'armv8-rp3': 3,\n'avr': 4,\n'mips': 5,\n'mips64el': 6,\n'mipsel': 7,\n'nios2': 8,\n'powerpc': 9 ,\n'powerpc64': 10,\n'powerpc64le': 11,\n'riscv64': 12,\n's390': 13,\n's390x-64': 14,\n 'sh': 15,\n'sparc': 16,\n'sparc64': 17,\n'x86_64-ubuntu18.04-linux-gnu': 18,\n'xtensa': 19}\n\ndef pad(seq):\n return seq + [0]*(seq_length - len(seq))\n\ndef process(f):\n data = []\n labels = []\n longdata = f.read().strip().split('\\n')\n _bool = False\n for i in longdata:\n temp = i.strip().split(' ')\n label = temp[:1][0]\n t = temp[1:]\n \n t = list(map(lambda x: int(x, 16), t))\n\n for start in range(0, len(t), seq_length):\n # print(start, start + seq_length)\n temp = t[start: start + seq_length]\n if temp == []:\n break \n if len(temp) < seq_length:\n temp = pad(temp)\n\n assert(len(temp) == seq_length)\n data.append(temp)\n labels.append(archs[label])\n\n # print(data[:10])\n # print(labels[:10])\n # print(len(data), len(labels))\n return labels, data \n\ndef read_train_data():\n train_labels = []\n train_data = []\n _bool = False\n for file in archs:\n # print(file)\n with open('{}.train'.format(file), 'r') as f:\n labels, data = process(f)\n train_data += data\n train_labels += labels\n # train_data.append(data)\n # train_labels.append(labels)\n\n print(len(data), len(labels), '============')\n # print(train_data[:1])\n # print(train_labels[:1])\n return train_data, train_labels\n\ntrain_data, train_label = read_train_data()\n\nassert( len(train_data) == len(train_label) )\nprint(len(train_label), len(train_data))\n\n\ntrain_data = np.array(train_data)\ntrain_label = np.array(train_label)\n\nprint(train_label.shape, train_data.shape)\nprint(train_data[:3])\nprint(train_label[:3])\n# train_label = to_categorical(train_label, num_classes=None)\n\nx_train, x_test, y_train, y_test = train_test_split(train_data, train_label, test_size=0.2) \n\nprint('Train size: {} {}'.format(len(x_train), len(y_train)))\n\nprint('Test size: {} {}'.format(len(x_test), len(y_test)))\n\nmodel = Sequential()\n"
] | [
[
"numpy.array",
"sklearn.model_selection.train_test_split"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
GuangYang98/ISTN-Custom | [
"9a1fb7163b5d180a5cb26f9bb835383261ac92fc"
] | [
"data/big_mess_300300/messUpImagesAndMasks.py"
] | [
"import cv2\nimport scipy.ndimage as nd\nimport random\n\nimport numpy as np\nfrom PIL import Image\nimport pickle\nimport os\n\nos.mkdir(\"Test_Images\")\nos.mkdir(\"Test_Masks\")\nos.mkdir(\"Train_Images\")\nos.mkdir(\"Train_Masks\")\n#\nos.mkdir(\"Test_Images_Unaligned\")\nos.mkdir(\"Test_Masks_Unaligned\")\nos.mkdir(\"Train_Images_Unaligned\")\nos.mkdir(\"Train_Masks_Unaligned\")\n\nrotations = {}\nrotations['Test_Rotations'] = []\nrotations['Train_Rotations'] = []\n\n\nIMAGESHAPE = (384, 512)\nPADSHAPE = ((64,64), (128,128), (0,0))\nMAXANGLE = 60\n\nINPUT_IMAGES_FOLDER = \"Images\"\nINPUT_MASKS_FOLDER = \"Masks\"\n\ndef reShape(name, img, angle, blur):\n print(\"reshaped: \" + name)\n res = cv2.resize(img, dsize=IMAGESHAPE, interpolation=cv2.INTER_AREA)\n pad = np.copy(np.pad(res, PADSHAPE, mode='constant', constant_values=0))\n rot = nd.rotate(pad, angle, reshape=False)\n if (blur == True):\n rot = cv2.blur(rot, (300, 300))\n return rot\n\ndef resizeAndPad(img, blur):\n res = cv2.resize(img, dsize=IMAGESHAPE, interpolation=cv2.INTER_AREA)\n pad = np.copy(np.pad(res, PADSHAPE, mode='constant', constant_values=0))\n if (blur == True):\n pad = cv2.blur(pad, (300, 300))\n return pad\n\ndef zeroPadName(i):\n x = \"\"\n if (i < 1000):\n x = x + \"0\"\n if (i < 100):\n x = x + \"0\"\n if (i < 10):\n x = x + \"0\"\n x = x + str(i)\n return x\n\ndef readReshapeOutputImage(inputFolder, normalOutputFolder, rotatedOutputFolder, inputImgName, angle, numImg, flag):\n inputPath = os.path.join(inputFolder, inputImgName)\n inputImg = cv2.imread(inputPath)\n reshapedImg = reShape(inputPath, inputImg, angle, flag)\n\n\n normalOutputImg = Image.fromarray(resizeAndPad(inputImg, flag))\n outputImgName = str(numImg) + \".png\"\n outputPath = os.path.join(normalOutputFolder, outputImgName)\n print(\"to: \" + outputPath)\n normalOutputImg.save(outputPath)\n\n rotatedOutputImg = Image.fromarray(reshapedImg)\n outputImgName = str(numImg) + \".png\"\n outputPath = os.path.join(rotatedOutputFolder, outputImgName)\n print(\"and: \" + outputPath)\n rotatedOutputImg.save(outputPath)\n\n\nfor i in range(0, 110):\n index = i\n angle = MAXANGLE * random.random()\n\n rotations['Train_Rotations'].append(angle)\n\n inputFolder = INPUT_IMAGES_FOLDER\n normalOutputFolder = \"Train_Images\"\n rotatedOutputFolder = \"Train_Images_Unaligned\"\n inputImgName = \"mask\" + zeroPadName(i) + \".png\"\n readReshapeOutputImage(inputFolder, normalOutputFolder, rotatedOutputFolder, inputImgName, angle, index, False) #知道顺序然后顺着读\n\n inputFolder = INPUT_MASKS_FOLDER\n normalOutputFolder = \"Train_Masks\"\n rotatedOutputFolder = \"Train_Masks_Unaligned\"\n inputImgName = str(i) + \".png\"\n readReshapeOutputImage(inputFolder, normalOutputFolder, rotatedOutputFolder, inputImgName, angle, index, True)\n\nfor i in range(110, 165):\n index = i % 110\n angle = MAXANGLE * random.random()\n\n rotations['Test_Rotations'].append(angle)\n\n inputFolder = INPUT_IMAGES_FOLDER\n normalOutputFolder = \"Test_Images\"\n rotatedOutputFolder = \"Test_Images_Unaligned\"\n inputImgName = \"mask\" + zeroPadName(i) + \".png\"\n readReshapeOutputImage(inputFolder, normalOutputFolder, rotatedOutputFolder, inputImgName, angle, index, False)\n\n inputFolder = INPUT_MASKS_FOLDER\n normalOutputFolder = \"Test_Masks\"\n rotatedOutputFolder = \"Test_Masks_Unaligned\"\n inputImgName = str(i) + \".png\"\n readReshapeOutputImage(inputFolder, normalOutputFolder, rotatedOutputFolder, inputImgName, angle, index, True)\n\n\npickle.dump(rotations, open('rotations.dat', \"wb\"))\n"
] | [
[
"numpy.pad",
"scipy.ndimage.rotate"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
jalexvig/anki_imager | [
"2cb8b54ccf92a35779e877ea8754828334bd7780"
] | [
"anki_imager/add_images.py"
] | [
"import os\nimport os.path as osp\nimport shutil\n\nimport pandas as pd\nfrom google_images_download import google_images_download\n\n# SETTINGS\nDPATH_ANKI = osp.join(osp.expanduser('~'), '.local/share/Anki2/User 1/collection.media/')\nDNAME_IMAGES = 'downloads'\nREQUEST_DELAY = 0\n\n\ndef process_notes_file(fpath,\n dpath_anki_media=DPATH_ANKI,\n num_imgs=1,\n resume=True,\n num_words=None,\n tags=False,\n suffix='_mod'):\n\n df = _read_notes_file(fpath)\n\n download_images(df, num_words, resume, num_imgs)\n\n _process_images(df, dpath_anki_media)\n\n _save_updates_notes(df, fpath, tags, suffix)\n\n\ndef _save_updates_notes(df,\n fpath,\n tags,\n suffix):\n\n fpath_mod = suffix.join(osp.splitext(fpath))\n\n if tags:\n cols = df.columns.tolist()\n cols = cols[:-2] + cols[:-3:-1]\n df = df[cols]\n\n df.to_csv(fpath_mod, sep='\\t', header=False)\n\n\ndef _read_notes_file(fpath):\n\n df = pd.read_csv(fpath, header=None, sep='\\t').set_index(0)\n\n return df\n\n\ndef _process_images(df,\n dpath_anki):\n\n df['imgs'] = None\n\n for root, dnames, fnames in os.walk(DNAME_IMAGES):\n\n if osp.split(root)[-1] == DNAME_IMAGES:\n continue\n\n fnames_new = []\n word = osp.split(root)[-1]\n for i, fname in enumerate(fnames):\n fext = osp.splitext(fname)[-1]\n fname_word_new = 'vocab_{}_{}{}'.format(word, i, fext)\n shutil.copy2(osp.join(root, fname), osp.join(dpath_anki, fname_word_new))\n fnames_new.append(fname_word_new)\n\n img_str = '<br><br>'.join([\"<img src='{}'/>\".format(fname) for fname in fnames_new])\n df.loc[word, 'imgs'] = img_str\n\n\ndef download_images(df,\n num_words,\n resume,\n num_imgs):\n\n paths_dict = {}\n\n for word in df.index[:num_words]:\n\n # Skip previously downloaded\n if resume and word in os.listdir(DNAME_IMAGES) and os.listdir(osp.join(DNAME_IMAGES, word)):\n continue\n\n response = google_images_download.googleimagesdownload()\n\n # google_images_download uses csv for separate searches so replace with spaces\n word_ = word.replace(',', ' ')\n kwargs = {\"keywords\": word_, \"limit\": num_imgs, 'delay': REQUEST_DELAY, 'image_directory': word}\n\n paths = response.download(kwargs)\n paths_dict.update(paths)\n\n return paths_dict\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
b01901143/DeepMatchVO | [
"c90481365774fe6fb46008ba129adfeff79fb855"
] | [
"data/cityscapes/cityscapes_loader.py"
] | [
"from __future__ import division\nimport json\nimport os, sys\nimport numpy as np\nimport scipy.misc\nfrom glob import glob\n\nCURDIR = os.path.dirname(__file__)\nsys.path.append(os.path.abspath(os.path.join(CURDIR, '..')))\nsys.path.append(os.path.abspath(os.path.join(CURDIR, '...')))\nfrom geo_utils import scale_intrinsics\nfrom common_utils import load_match_func\n\nclass cityscapes_loader(object):\n def __init__(self, \n dataset_dir,\n split='train',\n match_num=0,\n crop_bottom=True, # Get rid of the car logo\n sample_gap=2, # Sample every two frames to match KITTI frame rate\n img_height=171, \n img_width=416,\n seq_length=5):\n self.dataset_dir = dataset_dir\n self.split = split\n self.match_num = match_num\n # Crop out the bottom 25% of the image to remove the car logo\n self.crop_bottom = crop_bottom\n self.sample_gap = sample_gap\n self.img_height = img_height\n self.img_width = img_width\n self.seq_length = seq_length\n assert seq_length % 2 != 0, 'seq_length must be odd!'\n self.frames = self.collect_frames(split)\n self.num_frames = len(self.frames)\n if split == 'train':\n self.num_train = self.num_frames\n else:\n self.num_test = self.num_frames\n print('Total frames collected: %d' % self.num_frames)\n \n def collect_frames(self, split):\n img_dir = self.dataset_dir + '/leftImg8bit_sequence/' + split + '/'\n city_list = os.listdir(img_dir)\n frames = []\n for city in city_list:\n img_files = glob(img_dir + city + '/*.png')\n for f in img_files:\n frame_id = os.path.basename(f).split('leftImg8bit')[0]\n frames.append(frame_id)\n return frames\n\n def get_train_example_with_idx(self, tgt_idx):\n tgt_frame_id = self.frames[tgt_idx]\n if not self.is_valid_example(tgt_frame_id):\n return False\n example = self.load_example(self.frames[tgt_idx], load_matches=(self.match_num!=0))\n return example\n\n def load_intrinsics(self, frame_id, split):\n city, seq, _, _ = frame_id.split('_')\n camera_file = os.path.join(self.dataset_dir, 'camera',\n split, city, city + '_' + seq + '_*_camera.json')\n camera_file = glob(camera_file)[0]\n with open(camera_file, 'r') as f: \n camera = json.load(f)\n fx = camera['intrinsic']['fx']\n fy = camera['intrinsic']['fy']\n u0 = camera['intrinsic']['u0']\n v0 = camera['intrinsic']['v0']\n intrinsics = np.array([[fx, 0, u0],\n [0, fy, v0],\n [0, 0, 1]])\n return intrinsics\n\n def is_valid_example(self, tgt_frame_id):\n city, snippet_id, tgt_local_frame_id, _ = tgt_frame_id.split('_')\n half_offset = int((self.seq_length - 1)/2 * self.sample_gap)\n for o in range(-half_offset, half_offset + 1, self.sample_gap):\n curr_local_frame_id = '%.6d' % (int(tgt_local_frame_id) + o)\n curr_frame_id = '%s_%s_%s_' % (city, snippet_id, curr_local_frame_id)\n curr_image_file = os.path.join(self.dataset_dir, 'leftImg8bit_sequence', \n self.split, city, curr_frame_id + 'leftImg8bit.png')\n if not os.path.exists(curr_image_file):\n return False\n return True\n\n def load_image_sequence(self, tgt_frame_id, seq_length, crop_bottom):\n city, snippet_id, tgt_local_frame_id, _ = tgt_frame_id.split('_')\n half_offset = int((self.seq_length - 1)/2 * self.sample_gap)\n image_seq = []\n for o in range(-half_offset, half_offset + 1, self.sample_gap):\n curr_local_frame_id = '%.6d' % (int(tgt_local_frame_id) + o)\n curr_frame_id = '%s_%s_%s_' % (city, snippet_id, curr_local_frame_id)\n curr_image_file = os.path.join(self.dataset_dir, 'leftImg8bit_sequence', \n self.split, city, curr_frame_id + 'leftImg8bit.png')\n curr_img = scipy.misc.imread(curr_image_file)\n raw_shape = np.copy(curr_img.shape)\n if o == 0:\n zoom_y = self.img_height/raw_shape[0]\n zoom_x = self.img_width/raw_shape[1]\n curr_img = scipy.misc.imresize(curr_img, (self.img_height, self.img_width))\n if crop_bottom:\n ymax = int(curr_img.shape[0] * 0.75)\n curr_img = curr_img[:ymax]\n image_seq.append(curr_img)\n return image_seq, zoom_x, zoom_y\n \n def load_example(self, tgt_frame_id, load_gt_pose=False, load_matches=False):\n image_seq, zoom_x, zoom_y = self.load_image_sequence(tgt_frame_id, self.seq_length, self.crop_bottom)\n intrinsics = self.load_intrinsics(tgt_frame_id, self.split)\n intrinsics = scale_intrinsics(intrinsics, zoom_x, zoom_y)\n example = {}\n example['intrinsics'] = intrinsics\n example['image_seq'] = image_seq\n example['folder_name'] = tgt_frame_id.split('_')[0]\n example['file_name'] = tgt_frame_id[:-1]\n if load_matches:\n city, snippet_id, tgt_local_frame_id, _ = tgt_frame_id.split('_')\n tgt_frame_id = tgt_frame_id + 'leftImg8bit'\n sift_folder = os.path.join(self.dataset_dir, 'leftImg8bit_sequence', self.split, city, 'sift')\n match_folder = os.path.join(self.dataset_dir, 'leftImg8bit_sequence', self.split, city, 'match')\n matches = load_match_func(sift_folder, match_folder, tgt_frame_id, zoom_x, zoom_y, self.seq_length)\n example['match'] = matches\n return example"
] | [
[
"numpy.copy",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mmaher22/Instance-based-label-smoothing | [
"015ec25ffdd9a9c3d8c9d1deb373d1ab113cb443"
] | [
"models/resnet_sd.py"
] | [
"import torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nimport torch\nimport torch.nn.functional as F\nfrom torchvision.models.resnet import conv3x3\nimport math\n\n__all__ = ['ResNet_SD', 'resnet18_SD', 'resnet34_SD', 'resnet50_SD', 'resnet101_SD',\n 'resnet152_SD']\n\n\nmodel_urls = {\n 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n}\n\n\nclass BasicBlockWithDeathRate(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, death_rate=0.,\n downsample=None):\n super(BasicBlockWithDeathRate, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu1 = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.relu2 = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n self.death_rate = death_rate\n\n def forward(self, x):\n residual = x\n if self.downsample is not None:\n x = self.downsample(x)\n # TODO: fix the bug of original Stochatic depth\n if not self.training or torch.rand(1)[0] >= self.death_rate:\n residual = self.conv1(residual)\n residual = self.bn1(residual)\n residual = self.relu1(residual)\n residual = self.conv2(residual)\n residual = self.bn2(residual)\n if self.training:\n residual /= (1. - self.death_rate)\n x = x + residual\n x = self.relu2(x)\n\n return x\n\nclass DownsampleB(nn.Module):\n\n def __init__(self, nIn, nOut, stride):\n super(DownsampleB, self).__init__()\n self.avg = nn.AvgPool2d(stride)\n self.expand_ratio = nOut // nIn\n\n def forward(self, x):\n x = self.avg(x)\n return torch.cat([x] + [x.mul(0)] * (self.expand_ratio - 1), 1)\n\n\nclass ResNetCifar(nn.Module):\n '''Small ResNet for CIFAR & SVHN\n death_rates: death_rates of each block except for the first and\n the last block\n '''\n\n def __init__(self, depth, death_rates=None, block=BasicBlockWithDeathRate, num_classes=10, in_channels = 3):\n assert (depth - 2) % 6 == 0, 'depth should be one of 6N+2'\n super(ResNetCifar, self).__init__()\n n = (depth - 2) // 6\n assert death_rates is None or len(death_rates) == in_channels * n\n if death_rates is None:\n death_rates = [0.] * (in_channels * n)\n self.inplanes = 16\n self.conv1 = nn.Conv2d(in_channels, 16, kernel_size=3, stride=1, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(16)\n self.relu = nn.ReLU(inplace=True)\n self.layer1 = self._make_layer(block, 16, death_rates[:n])\n self.layer2 = self._make_layer(block, 32, death_rates[n:2 * n], stride=2)\n self.layer3 = self._make_layer(block, 64, death_rates[2 * n:], stride=2)\n self.avgpool = nn.AvgPool2d(8)\n self.fc = nn.Linear(64 * block.expansion, num_classes)\n self.temperature = torch.nn.Parameter(torch.ones(1), requires_grad=False)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, death_rates, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = DownsampleB(self.inplanes, planes * block.expansion, stride)\n\n layers = [block(self.inplanes, planes, stride, downsample=downsample,\n death_rate=death_rates[0])]\n self.inplanes = planes * block.expansion\n for death_rate in death_rates[1:]:\n layers.append(block(self.inplanes, planes, death_rate=death_rate))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n\n x = self.avgpool(x)\n x = x.view(x.size(0), -1)\n x = self.fc(x)\n x /= self.temperature\n return x\n\n\ndef resnet110_SD(pretrained=False, num_classes = 1000, in_channels = 3, **kwargs):\n \"\"\"Constructs a ResNet_SD-110 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n death_rate = 0.5; death_mode = 'linear'; depth = 110;\n assert (depth - 2) % 6 == 0, 'depth should be one of 6N+2'\n nblocks = (depth - 2) // 2\n if death_mode == 'uniform':\n death_rates = [death_rate] * nblocks\n elif death_mode == 'linear':\n death_rates = [float(i + 1) * death_rate / float(nblocks) for i in range(nblocks)]\n else:\n death_rates = None\n return ResNetCifar(depth, death_rates, BasicBlockWithDeathRate, num_classes, in_channels)\n\n\ndef resnet152_SD(pretrained=False, num_classes = 1000, in_channels = 3, **kwargs):\n \"\"\"Constructs a ResNet_SD-110 model.\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n \"\"\"\n death_rate = 0.5; death_mode = 'linear'; depth = 152;\n assert (depth - 2) % 6 == 0, 'depth should be one of 6N+2'\n nblocks = (depth - 2) // 2\n if death_mode == 'uniform':\n death_rates = [death_rate] * nblocks\n elif death_mode == 'linear':\n death_rates = [float(i + 1) * death_rate / float(nblocks) for i in range(nblocks)]\n else:\n death_rates = None\n return ResNetCifar(depth, death_rates, BasicBlockWithDeathRate, num_classes, in_channels)"
] | [
[
"torch.nn.Sequential",
"torch.ones",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.AvgPool2d",
"torch.rand",
"torch.nn.BatchNorm2d",
"torch.nn.ReLU"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liyu711/SUAS | [
"2f6592fc2ab316475eeabe2f4828e5ba5c1a4b0b"
] | [
"tests/unit_tests/test_sda.py"
] | [
"import unittest\nimport numpy as np\nfrom SDA import *\n\nclass SDATestCase(unittest.TestCase):\n\n def setUp(self):\n self.obstacle_map = ObstacleMap(np.array([0,0,0]), np.array([np.array([[-2000, -2000], [-2000, 2000], [2000, 2000], [2000, -2000]])]))\n\n def test_add_obstacles(self):\n \"\"\"\n Test ObstacleMap's add_obstacle() method\n \"\"\"\n new_obstacle = StationaryObstacle(np.array([0, 10, 0]), 5, 20)\n\n self.obstacle_map.add_obstacle(new_obstacle)\n\n self.assertEqual(self.obstacle_map.get_obstacles().size, 1)\n\n def test_reset_obstacles(self):\n \"\"\"\n Test ObstacleMap's reset_obstacles() method\n \"\"\"\n new_obstacle = StationaryObstacle(np.array([0, 10, 0]), 5, 20)\n\n self.obstacle_map.add_obstacle(new_obstacle)\n self.obstacle_map.reset_obstacles()\n\n self.assertEqual(self.obstacle_map.get_obstacles().size, 0)\n\n def test_reset_waypoints(self):\n \"\"\"\n Test ObstacleMap's reset_waypoints() method\n \"\"\"\n self.obstacle_map.reset_obstacles()\n self.obstacle_map.reset_obstacles()\n self.obstacle_map.set_drone_position(np.array([0,0,0]))\n waypoint = np.array([20, 0, 0])\n\n self.obstacle_map.add_waypoint(waypoint)\n\n self.obstacle_map.reset_waypoints()\n self.assertEqual(len(self.obstacle_map.get_drone().get_waypoint_holder()), 0)\n\n def test_obstacle_in_path_detection_false(self):\n \"\"\"\n Test ObstacleMap's ability to determine if obstacles intersect with\n waypoint path. This test includes an obstacle that is not in the path\n of the UAV.\n \"\"\"\n self.obstacle_map.reset_obstacles()\n self.obstacle_map.reset_waypoints()\n self.obstacle_map.set_drone_position(np.array([0,0,0]))\n obstacle_in_path = StationaryObstacle(np.array([50, 0, 0]), 5, 20)\n waypoint = np.array([50, 50, 0])\n\n self.obstacle_map.add_obstacle(obstacle_in_path)\n self.obstacle_map.add_waypoint(waypoint)\n\n obstacle_in_path_boolean, avoid_paths = self.obstacle_map.is_obstacle_in_path()\n self.assertEqual(obstacle_in_path_boolean, False)\n\n def test_obstacle_in_path_detection_true(self):\n \"\"\"\n Test ObstacleMap's ability to determine if obstacles intersect with\n waypoint path. This test includes an obstacle that is in the path of\n the UAV.\n \"\"\"\n self.obstacle_map.reset_obstacles()\n self.obstacle_map.reset_waypoints()\n self.obstacle_map.set_drone_position(np.array([0,0,0]))\n obstacle_in_path = StationaryObstacle(np.array([50, 0, 0]), 5, 20)\n waypoint = np.array([100, 0, 0])\n\n self.obstacle_map.add_obstacle(obstacle_in_path)\n self.obstacle_map.add_waypoint(waypoint)\n\n obstacle_in_path_boolean, avoid_paths = self.obstacle_map.is_obstacle_in_path()\n self.assertEqual(obstacle_in_path_boolean, True)\n\n def test_obstacle_under_waypoint_path_false(self):\n \"\"\"\n Test ObstacleMap's ability to go above an obstacle\n \"\"\"\n self.obstacle_map.reset_obstacles()\n self.obstacle_map.reset_waypoints()\n self.obstacle_map.set_drone_position(np.array([0,0,0]))\n obstacle_in_path = StationaryObstacle(np.array([50, 0, 0]), 5, 10)\n waypoint = np.array([100, 0, 50])\n new_uav_position = np.array([0, 0, 50])\n\n self.obstacle_map.add_obstacle(obstacle_in_path)\n self.obstacle_map.add_waypoint(waypoint)\n self.obstacle_map.set_drone_position(new_uav_position)\n\n obstacle_in_path_boolean, avoid_paths = self.obstacle_map.is_obstacle_in_path()\n self.assertEqual(obstacle_in_path_boolean, False)\n\n def test_obstacle_under_waypoint_path_true(self):\n \"\"\"\n Test ObstacleMap's ability to go above an obstacle\n \"\"\"\n self.obstacle_map.reset_obstacles()\n self.obstacle_map.reset_waypoints()\n self.obstacle_map.set_drone_position(np.array([0,0,25]))\n obstacle_in_path = StationaryObstacle(np.array([50, 0, 0]), 5, 15)\n waypoint = np.array([100, 0, 25])\n\n self.obstacle_map.add_obstacle(obstacle_in_path)\n self.obstacle_map.add_waypoint(waypoint)\n\n obstacle_in_path_boolean, avoid_paths = self.obstacle_map.is_obstacle_in_path()\n self.assertEqual(obstacle_in_path_boolean, True)\n\n def test_obstacle_avoid_coords_1(self):\n \"\"\"\n Test ObstacleMap's ability to generate correct avoidance coordinates\n \"\"\"\n self.obstacle_map.reset_obstacles()\n self.obstacle_map.reset_waypoints()\n obstacle_in_path = StationaryObstacle(np.array([-837.24189827,700.1041422,500]), 150, 500)\n waypoint = np.array([-1027.15210095,168.51612707,200.0000034 ])\n new_uav_position = np.array(np.array([-668.95868657,1051.56233827,200.0000064]))\n\n self.obstacle_map.add_obstacle(StationaryObstacle(np.array([0,0,0]), 150, 500))\n self.obstacle_map.add_waypoint(waypoint)\n self.obstacle_map.set_drone_position(new_uav_position)\n obstacle_in_path_boolean, avoid_paths = self.obstacle_map.is_obstacle_in_path()\n self.assertEqual(obstacle_in_path_boolean, False)\n\n def test_flight_boundary_simple(self):\n \"\"\"\n Test the flight boundary system using a simple boundary\n \"\"\"\n min_alt = 100\n max_alt = 750\n flight_boundary_test_object = FlightBoundary(min_alt, max_alt, np.array([[-2000, -2000], [-2000, 2000], [2000, 2000], [2000, -2000]]))\n\n # Inside alt\n in_bounds_boolean = flight_boundary_test_object.is_point_in_bounds(np.array([0, 0, ((min_alt + max_alt) / 2)]))\n self.assertTrue(in_bounds_boolean)\n\n # Below minimum altitude\n in_bounds_boolean = flight_boundary_test_object.is_point_in_bounds(np.array([0, 0, min_alt - 10]))\n self.assertEqual(in_bounds_boolean, False)\n\n # Above maximum altitude\n in_bounds_boolean = flight_boundary_test_object.is_point_in_bounds(np.array([0, 0, max_alt + 10]))\n self.assertEqual(in_bounds_boolean, False)\n\n # Inside alt, outside XY\n in_bounds_boolean = flight_boundary_test_object.is_point_in_bounds(np.array([2010, 2010, ((min_alt + max_alt) / 2)]))\n self.assertEqual(in_bounds_boolean, False)\n\n def test_flight_boundary_complicated(self):\n \"\"\"\n Test the flight boundary system using a complicated boundary\n \"\"\"\n min_alt = 100\n max_alt = 750\n flight_boundary_test_object = FlightBoundary(min_alt, max_alt, np.array([[0, 1000], [100, 100], [1000, 0], [100, -100], [0, -1000], [-100, -100], [-1000, 0], [-100, 100]]))\n\n # Inside bounds\n in_bounds_boolean = flight_boundary_test_object.is_point_in_bounds(np.array([50, 50, 200]))\n self.assertTrue(in_bounds_boolean)\n\n in_bounds_boolean = flight_boundary_test_object.is_point_in_bounds(np.array([0, 800, 200]))\n self.assertTrue(in_bounds_boolean)\n\n # Outside bounds\n in_bounds_boolean = flight_boundary_test_object.is_point_in_bounds(np.array([200, 200, 200]))\n self.assertEqual(in_bounds_boolean, False)\n\n in_bounds_boolean = flight_boundary_test_object.is_point_in_bounds(np.array([-200, -200, 200]))\n self.assertEqual(in_bounds_boolean, False)\n\n in_bounds_boolean = flight_boundary_test_object.is_point_in_bounds(np.array([0, 2000, 200]))\n self.assertEqual(in_bounds_boolean, False)\n\n def test_obstacle_avoidance_with_boundaries(self):\n \"\"\"\n Test the obstacle map to determine\n \"\"\"\n flight_boundary_points = np.array([np.array([[-10, -10], [250, -10], [250, 250], [-10, 250]])])\n obstacle_map = ObstacleMap(np.array([0,0,0]), flight_boundary_points)\n test_obstacle = StationaryObstacle(np.array([100, 0, 0]), 50, 500)\n waypoint = np.array([200, 0, 0])\n\n obstacle_map.add_obstacle(test_obstacle)\n obstacle_map.add_waypoint(waypoint)\n\n obstacle_in_path_boolean, avoid_paths = obstacle_map.is_obstacle_in_path()\n self.assertTrue(obstacle_in_path_boolean)\n"
] | [
[
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mrvollger/SDA | [
"eba7d36d0cadf9eba42053b7d2a3371247eebe1b"
] | [
"scripts/coverageByEnds.py"
] | [
"#!/usr/bin/env python\nimport argparse\nimport os\nimport sys\nimport re\nimport numpy as np\nimport intervaltree\nimport pandas as pd\n\nparser = argparse.ArgumentParser(description=\"\")\nparser.add_argument(\"-a\", \"--reads\", nargs=\"+\", help=\"bed file(s) with read start and end locations\" )\nparser.add_argument(\"-b\", \"--regions\", nargs= \"+\", help=\"bed file with regions to count within\")\nparser.add_argument(\"-o\", \"--out\", help=\"bed file with numebr of starts and ends in each region\")\nargs = parser.parse_args()\n\nreadFiles = args.reads\nregionFiles = args.regions\nregions = {}\ndf = None\n\ndef defineRegions():\n\tfor myfile in regionFiles:\n\t\tf = open(myfile).readlines()\n\t\tfor line in f:\n\t\t\tline = line.split()\n\t\t\tChr = line[0]\n\t\t\tstart = int(line[1])\n\t\t\tend = int( line[2] )\n\t\t\tif(Chr not in regions):\n\t\t\t\tregions[Chr] = intervaltree.IntervalTree()\n\t\t\t# first vlaue is number of starts in region, second is numer of ends in region\n\t\t\tregions[Chr][start:end+1] = [0,0]\n\n\ndef increment(point, Chr, startOrEnd):\n\tfor region in regions[Chr][point]:\n\t\tregion.data[startOrEnd] += 1\n\n\ndef addCounts(myfile):\n\tprint(myfile)\n\tf = open(myfile).readlines()\n\tfor line in f:\n\t\tline = line.split()\n\t\tChr = line[0]\n\t\tif(Chr not in regions):\n\t\t\tcontinue\n\t\tstart = int(line[1]) \n\t\tend = int(line[2])\n\t\tincrement(start, Chr, 0)\n\t\tincrement(end, Chr, 1)\n\ndef readReads():\n\tfor myfile in readFiles:\n\t\taddCounts(myfile)\n\n\ndef makeBed():\n\tglobal df\n\tout = \"\"\n\tfor key in sorted(regions):\n\t\ttree = regions[key]\n\t\tfor region in sorted(tree):\n\t\t\tout += \"{}\\t{}\\t{}\\t{}\\t{}\\t{}\\n\".format(key, region.begin, region.end-1, \n\t\t\t\t\tregion.data[0], region.data[1], max(region.data[0], region.data[1]) )\n\topen(args.out, \"w+\").write(out)\n\tdf = pd.read_csv(args.out, header=None, sep=\"\\t\")\n\tdf.columns = [\"chr\", \"start\", \"end\", \"startCount\", \"endCount\", \"maxCount\"]\n\tprint( df[[\"startCount\", \"endCount\", \"maxCount\"]].describe() )\n\ndef main():\n\tdefineRegions()\n\treadReads()\n\tmakeBed()\n\nmain()\n\n\nsds = df.groupby([\"chr\"])[\"maxCount\"].describe()\n\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
giovannidispoto/a-GPUBench | [
"2332fb68247cad347f889c006028385fed4c5c93"
] | [
"apps/tf/slim/nets/alexnet_tf2.py"
] | [
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n# Copyright 2021 Giovanni Dispoto\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Contains a model definition for AlexNet.\n\nThis work was first described in:\n ImageNet Classification with Deep Convolutional Neural Networks\n Alex Krizhevsky, Ilya Sutskever and Geoffrey E. Hinton\n\nand later refined in:\n One weird trick for parallelizing convolutional neural networks\n Alex Krizhevsky, 2014\n\nHere we provide the implementation proposed in \"One weird trick\" and not\n\"ImageNet Classification\", as per the paper, the LRN layers have been removed.\n\nUsage:\n with slim.arg_scope(alexnet.alexnet_v2_arg_scope()):\n outputs, end_points = alexnet.alexnet_v2(inputs)\n\n@@alexnet_v2\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow as tf\n\n\n\ndef alexnet_v2(num_classes=1000,\n is_training=True,\n weight_decay = 0.0005,\n dropout_keep_prob=0.5,\n spatial_squeeze=True,\n scope='alexnet_v2',\n network_depth = None,\n global_pool=False):\n \"\"\"AlexNet version 2.\n\n Described in: http://arxiv.org/pdf/1404.5997v2.pdf\n Parameters from:\n github.com/akrizhevsky/cuda-convnet2/blob/master/layers/\n layers-imagenet-1gpu.cfg\n\n Note: All the fully_connected layers have been transformed to conv2d layers.\n To use in classification mode, resize input to 224x224 or set\n global_pool=True. To use in fully convolutional mode, set\n spatial_squeeze to false.\n The LRN layers have been removed and change the initializers from\n random_normal_initializer to xavier_initializer.\n\n Args:\n inputs: a tensor of size [batch_size, height, width, channels].\n num_classes: the number of predicted classes. If 0 or None, the logits layer\n is omitted and the input features to the logits layerdropout_keep_prob are returned instead.\n is_training: whether or not the model is being trained.\n dropout_keep_prob: the probability that activations are kept in the dropout\n layers during training.\n spatial_squeeze: whether or not should squeeze the spatial dimensions of the\n logits. Useful to remove unnecessary dimensions for classification.\n scope: Optional scope for the variables.\n global_pool: Optional boolean flag. If True, the input to the classification\n layer is avgpooled to size 1x1, for any input size. (This is not part\n of the original AlexNet.)\n\n Returns:\n net: the output of the logits layer (if num_classes is a non-zero integer),\n or the non-dropped-out input to the logits layer (if num_classes is 0\n or None).\n end_points: a dict of tensors with intermediate activations.\n \"\"\"\n if weight_decay == None:\n regularizer = None\n else:\n regularizer = tf.keras.regularizers.L2(weight_decay) \n\n model = tf.keras.Sequential()\n \n model.add(tf.keras.Input(shape=[224, 224, 3]))\n model.add(tf.keras.layers.Conv2D(96, (11,11), (4,4), kernel_regularizer=regularizer))\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPool2D((2,2), (2,2)))\n model.add(tf.keras.layers.BatchNormalization())\n\n model.add(tf.keras.layers.Conv2D(256, (11, 11), (1,1), kernel_regularizer=regularizer))\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPool2D((3,3), (2,2)))\n model.add(tf.keras.layers.BatchNormalization())\n\n model.add(tf.keras.layers.Conv2D(384, (3,3), (1,1), kernel_regularizer=regularizer))\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.BatchNormalization())\n\n model.add(tf.keras.layers.Conv2D(384, (3,3), (1,1), kernel_regularizer=regularizer))\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.BatchNormalization())\n\n model.add(tf.keras.layers.Conv2D(256, (3,3), (1,1), kernel_regularizer=regularizer))\n model.add(tf.keras.layers.ReLU())\n model.add(tf.keras.layers.MaxPool2D((2,2), (2,2)))\n model.add(tf.keras.layers.BatchNormalization())\n\n model.add(tf.keras.layers.Flatten())\n\n #Fully Connected Part\n model.add(tf.keras.layers.Dense(4096, kernel_regularizer=regularizer))\n model.add(tf.keras.layers.ReLU())\n \n model.add(tf.keras.layers.Dropout(dropout_keep_prob))\n model.add(tf.keras.layers.BatchNormalization())\n\n model.add(tf.keras.layers.Dense(4096, activation=\"relu\", kernel_regularizer=regularizer))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dense(1000, kernel_regularizer=regularizer))\n model.add(tf.keras.layers.BatchNormalization())\n model.add(tf.keras.layers.Dropout(dropout_keep_prob))\n model.add(tf.keras.layers.BatchNormalization())\n # Output Layer\n model.add(tf.keras.layers.Dense(num_classes, activation=\"softmax\", kernel_regularizer=regularizer))\n\n return model \n\nalexnet_v2.default_image_size = 224\n"
] | [
[
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.Input",
"tensorflow.keras.regularizers.L2",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Sequential",
"tensorflow.keras.layers.MaxPool2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Flatten"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
Jerome-maker/ensae_teaching_cs | [
"43ea044361ee60c00c85aea354a7b25c21c0fd07"
] | [
"_todo/programme/seance9_d3js_travail.py"
] | [
"#coding:latin-1\nimport sys\nsys.path.append(\"../../../../program/python/pyensae/src\") # ligne inutile\n\nfrom pyensae import download_data\nimport pandas\n\ndownload_data(\"td9_station_travail.zip\", website = 'xd')\nfile1 = \"td9_station_travail.txt\"\ntbl = pandas.read_csv (file1, sep = \"\\t\")\n\n# voir http://dev.openlayers.org/docs/files/OpenLayers/Marker-js.html pour changer le marker\nhtml = \"\"\"\n<html><body>\n <div id=\"mapdiv\"></div>\n <script src=\"http://www.openlayers.org/api/OpenLayers.js\"></script>\n <script>\n map = new OpenLayers.Map(\"mapdiv\");\n map.addLayer(new OpenLayers.Layer.OSM());\n var proj = new OpenLayers.Projection(\"EPSG:4326\");\n \n var size = new OpenLayers.Size(10,10);\n var offset = new OpenLayers.Pixel(-(size.w/2), -size.h);\n\n var icon_rouge = new OpenLayers.Icon('http://www.xavierdupre.fr/blog/documents/carrerouge.png', size, offset);\n var icon_vert = new OpenLayers.Icon('http://www.xavierdupre.fr/blog/documents/carrevert.png', size, offset);\n \n var zoom=13;\n \n var markers = new OpenLayers.Layer.Markers( \"Markers\" );\n map.addLayer(markers);\n \n __VELIB__\n \n map.setCenter (lonLat0, zoom);\n </script>\n</body></html> \n\"\"\"\n\nposition =\"\"\"\n var lonLat{0} = new OpenLayers.LonLat( {1} ,{2} ).transform(proj, map.getProjectionObject() );\n markers.addMarker(new OpenLayers.Marker(position=lonLat{0},icon=icon_{3}.clone()));\n\"\"\" \n\nlines = [ ]\nfor i,row in enumerate(tbl.values) :\n x = lng = row[2]\n y = lat = row[3]\n c = row[1]\n icon = \"rouge\" if c > 0.5 else \"vert\"\n line = position.format(i,x,y, icon)\n lines.append(line)\n \ntext = \"\\n\".join( lines )\nhtml = html.replace(\"__VELIB__\", text)\nwith open(\"velib_work.html\", \"w\") as f : f.write(html)\n\n#import webbrowser\n#webbrowser.open(\"velib_work.html\")\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
carefree0910/carefree-toolkit | [
"f5afe62a0eb3d05ed08a12632adfb168461413cb"
] | [
"cftool/misc.py"
] | [
"import io\nimport os\nimport sys\nimport dill\nimport json\nimport math\nimport time\nimport errno\nimport random\nimport shutil\nimport decimal\nimport inspect\nimport logging\nimport hashlib\nimport zipfile\nimport datetime\nimport operator\nimport threading\nimport unicodedata\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom typing import *\nfrom abc import abstractmethod\nfrom PIL import Image\nfrom functools import reduce\nfrom functools import partial\nfrom itertools import product\nfrom collections import Counter\n\nfrom numpy.lib.stride_tricks import as_strided\n\n\ndill._dill._reverse_typemap[\"ClassType\"] = type\n\n\n# util functions\n\n\ndef timestamp(simplify: bool = False, ensure_different: bool = False) -> str:\n \"\"\"\n Return current timestamp.\n\n Parameters\n ----------\n simplify : bool. If True, format will be simplified to 'year-month-day'.\n ensure_different : bool. If True, format will include millisecond.\n\n Returns\n -------\n timestamp : str\n\n \"\"\"\n\n now = datetime.datetime.now()\n if simplify:\n return now.strftime(\"%Y-%m-%d\")\n if ensure_different:\n return now.strftime(\"%Y-%m-%d_%H-%M-%S-%f\")\n return now.strftime(\"%Y-%m-%d_%H-%M-%S\")\n\n\ndef prod(iterable: Iterable) -> float:\n \"\"\" Return cumulative production of an iterable. \"\"\"\n\n return float(reduce(operator.mul, iterable, 1))\n\n\ndef hash_code(code: str) -> str:\n \"\"\" Return hash code for a string. \"\"\"\n\n code = code.encode()\n return hashlib.md5(code).hexdigest()[:8]\n\n\ndef prefix_dict(d: Dict[str, Any], prefix: str):\n \"\"\" Prefix every key in dict `d` with `prefix`. \"\"\"\n\n return {f\"{prefix}_{k}\": v for k, v in d.items()}\n\n\ndef shallow_copy_dict(d: dict) -> dict:\n d = d.copy()\n for k, v in d.items():\n if isinstance(v, dict):\n d[k] = shallow_copy_dict(v)\n return d\n\n\ndef update_dict(src_dict: dict, tgt_dict: dict) -> dict:\n \"\"\"\n Update tgt_dict with src_dict.\n * Notice that changes will happen only on keys which src_dict holds.\n\n Parameters\n ----------\n src_dict : dict\n tgt_dict : dict\n\n Returns\n -------\n tgt_dict : dict\n\n \"\"\"\n\n for k, v in src_dict.items():\n tgt_v = tgt_dict.get(k)\n if tgt_v is None:\n tgt_dict[k] = v\n elif not isinstance(v, dict):\n tgt_dict[k] = v\n else:\n update_dict(v, tgt_v)\n return tgt_dict\n\n\ndef fix_float_to_length(num: float, length: int) -> str:\n \"\"\" Change a float number to string format with fixed length. \"\"\"\n\n ctx = decimal.Context()\n ctx.prec = 2 * length\n d = ctx.create_decimal(repr(num))\n str_num = format(d, \"f\").lower()\n if str_num == \"nan\":\n return f\"{str_num:^{length}s}\"\n idx = str_num.find(\".\")\n if idx == -1:\n diff = length - len(str_num)\n if diff <= 0:\n return str_num\n if diff == 1:\n return f\"{str_num}.\"\n return f\"{str_num}.{'0' * (diff - 1)}\"\n length = max(length, idx)\n return str_num[:length].ljust(length, \"0\")\n\n\ndef truncate_string_to_length(string: str, length: int) -> str:\n \"\"\" Truncate a string to make sure its length not exceeding a given length. \"\"\"\n\n if len(string) <= length:\n return string\n half_length = int(0.5 * length) - 1\n head = string[:half_length]\n tail = string[-half_length:]\n return f\"{head}{'.' * (length - 2 * half_length)}{tail}\"\n\n\ndef grouped(iterable: Iterable, n: int, *, keep_tail: bool = False) -> List[tuple]:\n \"\"\" Group an iterable every `n` elements. \"\"\"\n\n if not keep_tail:\n return list(zip(*[iter(iterable)] * n))\n with batch_manager(iterable, batch_size=n, max_batch_size=n) as manager:\n return [tuple(batch) for batch in manager]\n\n\ndef grouped_into(iterable: Iterable, n: int) -> List[tuple]:\n \"\"\" Group an iterable into `n` groups. \"\"\"\n\n elements = list(iterable)\n num_elements = len(elements)\n num_elem_per_group = int(math.ceil(num_elements / n))\n results: List[tuple] = []\n split_idx = num_elements + n - n * num_elem_per_group\n start = 0\n for i in range(split_idx):\n end = start + num_elem_per_group\n results.append(tuple(elements[start:end]))\n start = end\n for i in range(split_idx, n):\n end = start + num_elem_per_group - 1\n results.append(tuple(elements[start:end]))\n start = end\n return results\n\n\ndef is_numeric(s: Any) -> bool:\n \"\"\" Check whether `s` is a number. \"\"\"\n\n try:\n s = float(s)\n return True\n except (TypeError, ValueError):\n try:\n unicodedata.numeric(s)\n return True\n except (TypeError, ValueError):\n return False\n\n\ndef get_one_hot(feature: Union[list, np.ndarray], dim: int) -> np.ndarray:\n \"\"\"\n Get one-hot representation.\n\n Parameters\n ----------\n feature : array-like, source data of one-hot representation.\n dim : int, dimension of the one-hot representation.\n\n Returns\n -------\n one_hot : np.ndarray, one-hot representation of `feature`\n\n \"\"\"\n\n one_hot = np.zeros([len(feature), dim], np.int64)\n one_hot[range(len(one_hot)), np.asarray(feature, np.int64).ravel()] = 1\n return one_hot\n\n\ndef show_or_save(\n export_path: str,\n fig: Optional[plt.figure] = None,\n **kwargs: Any,\n) -> None:\n \"\"\"\n Utility function to deal with figure.\n\n Parameters\n ----------\n export_path : {None, str}\n * If None, the figure will be shown.\n * If str, it represents the path where the figure should be saved to.\n fig : {None, plt.Figure}\n * If None, default figure contained in plt will be executed.\n * If plt.figure, it will be executed\n\n \"\"\"\n\n if export_path is None:\n fig.show(**kwargs) if fig is not None else plt.show(**kwargs)\n else:\n if fig is not None:\n fig.savefig(export_path)\n else:\n plt.savefig(export_path, **kwargs)\n plt.close()\n\n\ndef show_or_return(return_canvas: bool) -> Union[None, np.ndarray]:\n \"\"\"\n Utility function to deal with current plt.\n\n Parameters\n ----------\n return_canvas : bool, whether return canvas or not.\n\n \"\"\"\n\n if not return_canvas:\n plt.show()\n return\n\n buffer_ = io.BytesIO()\n plt.savefig(buffer_, format=\"png\")\n plt.close()\n buffer_.seek(0)\n image = Image.open(buffer_)\n canvas = np.asarray(image)[..., :3]\n buffer_.close()\n return canvas\n\n\ndef get_indices_from_another(base: np.ndarray, segment: np.ndarray) -> np.ndarray:\n \"\"\"\n Get `segment` elements' indices in `base`.\n\n Warnings\n ----------\n All elements in segment should appear in base to ensure validity.\n\n Parameters\n ----------\n base : np.ndarray, base array.\n segment : np.ndarray, segment array.\n\n Returns\n -------\n indices : np.ndarray, positions where elements in `segment` appear in `base`\n\n Examples\n -------\n >>> import numpy as np\n >>> base, segment = np.arange(100), np.random.permutation(100)[:10]\n >>> assert np.allclose(get_indices_from_another(base, segment), segment)\n\n \"\"\"\n base_sorted_args = np.argsort(base)\n positions = np.searchsorted(base[base_sorted_args], segment)\n return base_sorted_args[positions]\n\n\nclass UniqueIndices(NamedTuple):\n \"\"\"\n unique : np.ndarray, unique values of the given array (`arr`).\n unique_cnt : np.ndarray, counts of each unique value.\n sorting_indices : np.ndarray, indices which can (stably) sort the given\n array by its value.\n split_arr : np.ndarray, array which can split the `sorting_indices`\n to make sure that. Each portion of the split\n indices belong & only belong to one of the\n unique values.\n \"\"\"\n\n unique: np.ndarray\n unique_cnt: np.ndarray\n sorting_indices: np.ndarray\n split_arr: np.ndarray\n\n @property\n def split_indices(self):\n return np.split(self.sorting_indices, self.split_arr)\n\n\ndef get_unique_indices(arr: np.ndarray) -> UniqueIndices:\n \"\"\"\n Get indices for unique values of an array.\n\n Parameters\n ----------\n arr : np.ndarray, target array which we wish to find indices of each unique value.\n\n Returns\n -------\n UniqueIndices\n\n Examples\n -------\n >>> import numpy as np\n >>> arr = np.array([1, 2, 3, 2, 4, 1, 0, 1], np.int64)\n >>> # UniqueIndices(\n >>> # unique = array([0, 1, 2, 3, 4], dtype=int64),\n >>> # unique_cnt = array([1, 3, 2, 1, 1], dtype=int64),\n >>> # sorting_indices = array([6, 0, 5, 7, 1, 3, 2, 4], dtype=int64),\n >>> # split_arr = array([1, 4, 6, 7], dtype=int64))\n >>> # split_indices = [array([6], dtype=int64), array([0, 5, 7], dtype=int64),\n >>> # array([1, 3], dtype=int64), array([2], dtype=int64),\n >>> # array([4], dtype=int64)]\n >>> print(get_unique_indices(arr))\n\n \"\"\"\n unique, unique_inv, unique_cnt = np.unique(\n arr,\n return_inverse=True,\n return_counts=True,\n )\n sorting_indices, split_arr = (\n np.argsort(unique_inv, kind=\"mergesort\"),\n np.cumsum(unique_cnt)[:-1],\n )\n return UniqueIndices(unique, unique_cnt, sorting_indices, split_arr)\n\n\ndef get_counter_from_arr(arr: np.ndarray) -> Counter:\n \"\"\"\n Get `Counter` of an array.\n\n Parameters\n ----------\n arr : np.ndarray, target array which we wish to get `Counter` from.\n\n Returns\n -------\n Counter\n\n Examples\n -------\n >>> import numpy as np\n >>> arr = np.array([1, 2, 3, 2, 4, 1, 0, 1], np.int64)\n >>> # Counter({1: 3, 2: 2, 0: 1, 3: 1, 4: 1})\n >>> print(get_counter_from_arr(arr))\n\n \"\"\"\n if isinstance(arr, np.ndarray):\n arr = dict(zip(*np.unique(arr, return_counts=True)))\n return Counter(arr)\n\n\ndef allclose(*arrays: np.ndarray, **kwargs) -> bool:\n \"\"\"\n Perform `np.allclose` to `arrays` one by one.\n\n Parameters\n ----------\n arrays : np.ndarray, target arrays.\n **kwargs : keyword arguments which will be passed into `np.allclose`.\n\n Returns\n -------\n allclose : bool\n\n \"\"\"\n for i, arr in enumerate(arrays[:-1]):\n if not np.allclose(arr, arrays[i + 1], **kwargs):\n return False\n return True\n\n\ndef register_core(\n name: str,\n global_dict: Dict[str, type],\n *,\n before_register: Optional[Callable] = None,\n after_register: Optional[Callable] = None,\n):\n def _register(cls):\n if before_register is not None:\n before_register(cls)\n registered = global_dict.get(name)\n if registered is not None:\n print(\n f\"{LoggingMixin.warning_prefix}'{name}' has already registered \"\n f\"in the given global dict ({global_dict})\"\n )\n return cls\n global_dict[name] = cls\n if after_register is not None:\n after_register(cls)\n return cls\n\n return _register\n\n\ndef check(constraints: Dict[str, Union[str, List[str]]], *, raise_error: bool = True):\n def wrapper(fn):\n def _check_core(k, v):\n new_v = v\n constraint_list = constraints.get(k)\n if constraint_list is not None:\n if isinstance(constraint_list, str):\n constraint_list = [constraint_list]\n if constraint_list[0] == \"choices\":\n choices = constraint_list[1]\n if v not in choices:\n raise ValueError(\n f\"given value ({v}) is not included in \"\n f\"given choices ({choices})\"\n )\n else:\n for constraint in constraint_list:\n check_rs = getattr(SanityChecker, constraint)(v)\n if not check_rs[\"suc\"]:\n raise ValueError(check_rs[\"info\"])\n new_v = check_rs[\"n\"]\n if v != new_v:\n if raise_error:\n raise ValueError(\n f\"'{k}' ({v}, {type(v)}) does not satisfy \"\n f\"Constraints({constraint_list})\"\n )\n msg = f\"{LoggingMixin.warning_prefix}'{k}' is cast from {v} -> {new_v}\"\n print(msg)\n return new_v\n\n def inner(*args, **kwargs):\n signature_keys = list(inspect.signature(fn).parameters.keys())\n new_args = []\n for arg, signature_key in zip(args, signature_keys[: len(args)]):\n new_args.append(_check_core(signature_key, arg))\n new_kwargs = {}\n for k, v in kwargs.items():\n new_kwargs[k] = _check_core(k, v)\n return fn(*new_args, **new_kwargs)\n\n return inner\n\n return wrapper\n\n\n# util modules\n\n\nclass StrideArray:\n def __init__(\n self,\n arr: np.ndarray,\n *,\n copy: bool = False,\n writable: Optional[bool] = None,\n ):\n self.arr = arr\n self.shape = arr.shape\n self.num_dim = len(self.shape)\n self.strides = arr.strides\n self.copy = copy\n if writable is None:\n writable = copy\n self.writable = writable\n\n def __str__(self) -> str:\n return self.arr.__str__()\n\n def __repr__(self) -> str:\n return self.arr.__repr__()\n\n def _construct(\n self,\n shapes: Tuple[int, ...],\n strides: Tuple[int, ...],\n ) -> np.ndarray:\n arr = self.arr.copy() if self.copy else self.arr\n return as_strided(\n arr,\n shape=shapes,\n strides=strides,\n writeable=self.writable,\n )\n\n @staticmethod\n def _get_output_dim(in_dim: int, window: int, stride: int) -> int:\n return (in_dim - window) // stride + 1\n\n def roll(self, window: int, *, stride: int = 1, axis: int = -1) -> np.ndarray:\n while axis < 0:\n axis += self.num_dim\n target_dim = self.shape[axis]\n rolled_dim = self._get_output_dim(target_dim, window, stride)\n if rolled_dim <= 0:\n msg = f\"window ({window}) is too large for target dimension ({target_dim})\"\n raise ValueError(msg)\n # shapes\n rolled_shapes = tuple(self.shape[:axis]) + (rolled_dim, window)\n if axis < self.num_dim - 1:\n rolled_shapes = rolled_shapes + self.shape[axis + 1 :]\n # strides\n previous_strides = tuple(self.strides[:axis])\n target_stride = (self.strides[axis] * stride,)\n latter_strides = tuple(self.strides[axis:])\n rolled_strides = previous_strides + target_stride + latter_strides\n # construct\n return self._construct(rolled_shapes, rolled_strides)\n\n def patch(\n self,\n patch_w: int,\n patch_h: Optional[int] = None,\n *,\n h_stride: int = 1,\n w_stride: int = 1,\n h_axis: int = -2,\n ) -> np.ndarray:\n if self.num_dim < 2:\n raise ValueError(\"`patch` requires input with at least 2d\")\n while h_axis < 0:\n h_axis += self.num_dim\n w_axis = h_axis + 1\n if patch_h is None:\n patch_h = patch_w\n h_shape, w_shape = self.shape[h_axis], self.shape[w_axis]\n if h_shape < patch_h:\n msg = f\"patch_h ({patch_h}) is too large for target dimension ({h_shape})\"\n raise ValueError(msg)\n if w_shape < patch_w:\n msg = f\"patch_w ({patch_w}) is too large for target dimension ({w_shape})\"\n raise ValueError(msg)\n # shapes\n patched_h_dim = self._get_output_dim(h_shape, patch_h, h_stride)\n patched_w_dim = self._get_output_dim(w_shape, patch_w, w_stride)\n patched_dim = (patched_h_dim, patched_w_dim)\n patched_dim = patched_dim + (patch_h, patch_w)\n patched_shapes = tuple(self.shape[:h_axis]) + patched_dim\n if w_axis < self.num_dim - 1:\n patched_shapes = patched_shapes + self.shape[w_axis + 1 :]\n # strides\n arr_h_stride, arr_w_stride = self.strides[h_axis], self.strides[w_axis]\n previous_strides = tuple(self.strides[:h_axis])\n target_stride = (arr_h_stride * h_stride, arr_w_stride * w_stride)\n target_stride = target_stride + (arr_h_stride, arr_w_stride)\n latter_strides = tuple(self.strides[w_axis + 1 :])\n patched_strides = previous_strides + target_stride + latter_strides\n # construct\n return self._construct(patched_shapes, patched_strides)\n\n def repeat(self, k: int, axis: int = -1) -> np.ndarray:\n while axis < 0:\n axis += self.num_dim\n target_dim = self.shape[axis]\n if target_dim != 1:\n raise ValueError(\"`repeat` can only be applied on axis with dim == 1\")\n # shapes\n repeated_shapes = tuple(self.shape[:axis]) + (k,)\n if axis < self.num_dim - 1:\n repeated_shapes = repeated_shapes + self.shape[axis + 1 :]\n # strides\n previous_strides = tuple(self.strides[:axis])\n target_stride = (0,)\n latter_strides = tuple(self.strides[axis + 1 :])\n repeated_strides = previous_strides + target_stride + latter_strides\n # construct\n return self._construct(repeated_shapes, repeated_strides)\n\n\nclass SanityChecker:\n @staticmethod\n def int(n):\n rs = {\"suc\": True}\n try:\n rs[\"n\"] = int(n)\n return rs\n except Exception as e:\n rs[\"suc\"], rs[\"info\"] = False, e\n return rs\n\n @staticmethod\n def odd(n):\n rs = {\"suc\": True}\n try:\n n = rs[\"n\"] = int(n)\n if n % 2 == 1:\n return rs\n rs[\"suc\"], rs[\"info\"] = False, \"input is not an odd number\"\n return rs\n except Exception as e:\n rs[\"suc\"], rs[\"info\"] = False, e\n return rs\n\n @staticmethod\n def float(n):\n rs = {\"suc\": True}\n try:\n rs[\"n\"] = float(n)\n return rs\n except Exception as e:\n rs[\"suc\"], rs[\"info\"] = False, e\n return rs\n\n\nclass Incrementer:\n \"\"\"\n Util class which can calculate running mean & running std efficiently.\n\n Parameters\n ----------\n window_size : {int, None}, window size of running statistics.\n * If None, then all history records will be used for calculation.\n\n Examples\n ----------\n >>> incrementer = Incrementer(window_size=5)\n >>> for i in range(10):\n >>> incrementer.update(i)\n >>> if i >= 4:\n >>> print(incrementer.mean) # will print 2.0, 3.0, ..., 6.0, 7.0\n\n \"\"\"\n\n def __init__(self, window_size: int = None):\n if window_size is not None:\n if not isinstance(window_size, int):\n msg = f\"window size should be integer, {type(window_size)} found\"\n raise ValueError(msg)\n if window_size < 2:\n msg = f\"window size should be greater than 2, {window_size} found\"\n raise ValueError(msg)\n self._window_size = window_size\n self._n_record = self._previous = None\n self._running_sum = self._running_square_sum = None\n\n @property\n def mean(self):\n return self._running_sum / self._n_record\n\n @property\n def std(self):\n return math.sqrt(\n max(\n 0.0,\n self._running_square_sum / self._n_record - self.mean ** 2,\n )\n )\n\n @property\n def n_record(self):\n return self._n_record\n\n def update(self, new_value):\n if self._n_record is None:\n self._n_record = 1\n self._running_sum = new_value\n self._running_square_sum = new_value ** 2\n else:\n self._n_record += 1\n self._running_sum += new_value\n self._running_square_sum += new_value ** 2\n if self._window_size is not None:\n if self._previous is None:\n self._previous = [new_value]\n else:\n self._previous.append(new_value)\n if self._n_record == self._window_size + 1:\n self._n_record -= 1\n previous = self._previous.pop(0)\n self._running_sum -= previous\n self._running_square_sum -= previous ** 2\n\n\nclass _Formatter(logging.Formatter):\n \"\"\" Formatter for logging, which supports millisecond. \"\"\"\n\n converter = datetime.datetime.fromtimestamp\n\n def formatTime(self, record, datefmt=None):\n ct = self.converter(record.created)\n if datefmt:\n s = ct.strftime(datefmt)\n else:\n t = ct.strftime(\"%Y-%m-%d %H:%M:%S\")\n s = \"%s.%03d\" % (t, record.msecs)\n return s\n\n def formatMessage(self, record: logging.LogRecord) -> str:\n record.__dict__.setdefault(\"func_prefix\", \"Unknown\")\n return super().formatMessage(record)\n\n\nclass LoggingMixin:\n \"\"\"\n Mixin class to provide logging methods for base class.\n\n Attributes\n ----------\n _triggered_ : bool\n * If not `_triggered_`, log file will not be created.\n\n _verbose_level_ : int\n * Preset verbose level of the whole logging process.\n\n Methods\n ----------\n log_msg(self, body, prefix=\"\", verbose_level=1)\n Log something either through console or to a file.\n * body : str\n Main logging message.\n * prefix : str\n Prefix added to `body` when logging message goes through console.\n * verbose_level : int\n If `self._verbose_level_` >= verbose_level, then logging message\n will go through console.\n\n log_block_msg(self, body, prefix=\"\", title=\"\", verbose_level=1)\n Almost the same as `log_msg`, except adding `title` on top of `body`.\n\n \"\"\"\n\n _triggered_ = False\n _initialized_ = False\n _logging_path_ = None\n _logger_ = _verbose_level_ = None\n _date_format_string_ = \"%Y-%m-%d %H:%M:%S.%f\"\n _formatter_ = _Formatter(\n \"[ {asctime:s} ] [ {levelname:^8s} ] {func_prefix:s} {message:s}\",\n _date_format_string_,\n style=\"{\",\n )\n _timing_dict_, _time_cache_dict_ = {}, {}\n\n info_prefix = \"> [ info ] \"\n warning_prefix = \"> [warning] \"\n error_prefix = \"> [ error ] \"\n\n @property\n def logging_path(self):\n if self._logging_path_ is None:\n folder = os.path.join(os.getcwd(), \"_logging\", type(self).__name__)\n os.makedirs(folder, exist_ok=True)\n self._logging_path_ = self.generate_logging_path(folder)\n return self._logging_path_\n\n @property\n def console_handler(self):\n if self._logger_ is None:\n return\n for handler in self._logger_.handlers:\n if isinstance(handler, logging.StreamHandler):\n return handler\n\n @staticmethod\n def _get_func_prefix(frame=None, return_prefix=True):\n if frame is None:\n frame = inspect.currentframe().f_back.f_back\n if not return_prefix:\n return frame\n frame_info = inspect.getframeinfo(frame)\n file_name = truncate_string_to_length(os.path.basename(frame_info.filename), 16)\n func_name = truncate_string_to_length(frame_info.function, 24)\n func_prefix = (\n f\"[ {func_name:^24s} ] [ {file_name:>16s}:{frame_info.lineno:<4d} ]\"\n )\n return func_prefix\n\n @staticmethod\n def _release_handlers(logger):\n for handler in logger.handlers[:]:\n handler.close()\n logger.removeHandler(handler)\n\n @staticmethod\n def generate_logging_path(folder: str) -> str:\n return os.path.join(folder, f\"{timestamp()}.log\")\n\n def _init_logging(self, verbose_level: Optional[int] = 2, trigger: bool = True):\n wants_trigger = trigger and not LoggingMixin._triggered_\n if LoggingMixin._initialized_ and not wants_trigger:\n return self\n LoggingMixin._initialized_ = True\n logger_name = getattr(self, \"_logger_name_\", \"root\")\n logger = LoggingMixin._logger_ = logging.getLogger(logger_name)\n LoggingMixin._verbose_level_ = verbose_level\n if not trigger:\n return self\n LoggingMixin._triggered_ = True\n config = getattr(self, \"config\", {})\n self._logging_path_ = config.get(\"_logging_path_\")\n if self._logging_path_ is None:\n self._logging_path_ = config[\"_logging_path_\"] = self.logging_path\n os.makedirs(os.path.dirname(self.logging_path), exist_ok=True)\n file_handler = logging.FileHandler(self.logging_path, encoding=\"utf-8\")\n file_handler.setFormatter(self._formatter_)\n file_handler.setLevel(logging.DEBUG)\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n console.setFormatter(_Formatter(\"{custom_prefix:s}{message:s}\", style=\"{\"))\n logger.setLevel(logging.DEBUG)\n self._release_handlers(logger)\n logger.addHandler(console)\n logger.addHandler(file_handler)\n self.log_block_msg(sys.version, title=\"system version\", verbose_level=None)\n return self\n\n def log_msg(\n self,\n body: str,\n prefix: str = \"\",\n verbose_level: Optional[int] = 1,\n msg_level: int = logging.INFO,\n frame=None,\n ):\n preset_verbose_level = getattr(self, \"_verbose_level\", None)\n if preset_verbose_level is not None:\n self._verbose_level_ = preset_verbose_level\n elif self._verbose_level_ is None:\n self._verbose_level_ = 0\n console_handler = self.console_handler\n if verbose_level is None or self._verbose_level_ < verbose_level:\n do_print, console_level = False, msg_level + 10\n else:\n do_print, console_level = not LoggingMixin._triggered_, msg_level\n if console_handler is not None:\n console_handler.setLevel(console_level)\n if do_print:\n print(prefix + body)\n elif LoggingMixin._triggered_:\n func_prefix = self._get_func_prefix(frame)\n self._logger_.log(\n msg_level,\n body,\n extra={\"func_prefix\": func_prefix, \"custom_prefix\": prefix},\n )\n if console_handler is not None:\n console_handler.setLevel(logging.INFO)\n\n def log_block_msg(\n self,\n body: str,\n prefix: str = \"\",\n title: str = \"\",\n verbose_level: Optional[int] = 1,\n msg_level: int = logging.INFO,\n frame=None,\n ):\n frame = self._get_func_prefix(frame, False)\n self.log_msg(f\"{title}\\n{body}\\n\", prefix, verbose_level, msg_level, frame)\n\n def exception(self, body, frame=None):\n self._logger_.exception(\n body,\n extra={\n \"custom_prefix\": self.error_prefix,\n \"func_prefix\": LoggingMixin._get_func_prefix(frame),\n },\n )\n\n @staticmethod\n def log_with_external_method(body, prefix, log_method, *args, **kwargs):\n if log_method is None:\n print(prefix + body)\n else:\n kwargs[\"frame\"] = LoggingMixin._get_func_prefix(\n kwargs.pop(\"frame\", None),\n False,\n )\n log_method(body, prefix, *args, **kwargs)\n\n @staticmethod\n def merge_logs_by_time(*log_files, tgt_file):\n tgt_folder = os.path.dirname(tgt_file)\n date_str_len = (\n len(datetime.datetime.today().strftime(LoggingMixin._date_format_string_))\n + 4\n )\n with lock_manager(tgt_folder, [tgt_file], clear_stuffs_after_exc=False):\n msg_dict, msg_block, last_searched = {}, [], None\n for log_file in log_files:\n with open(log_file, \"r\") as f:\n for line in f:\n date_str = line[:date_str_len]\n if date_str[:2] == \"[ \" and date_str[-2:] == \" ]\":\n searched_time = datetime.datetime.strptime(\n date_str[2:-2],\n LoggingMixin._date_format_string_,\n )\n else:\n msg_block.append(line)\n continue\n if last_searched is not None:\n msg_block_ = \"\".join(msg_block)\n msg_dict.setdefault(last_searched, []).append(msg_block_)\n last_searched = searched_time\n msg_block = [line]\n if msg_block:\n msg_dict.setdefault(last_searched, []).append(\n \"\".join(msg_block)\n )\n with open(tgt_file, \"w\") as f:\n f.write(\"\".join([\"\".join(msg_dict[key]) for key in sorted(msg_dict)]))\n\n @classmethod\n def reset_logging(cls) -> None:\n cls._triggered_ = False\n cls._initialized_ = False\n cls._logging_path_ = None\n if cls._logger_ is not None:\n cls._release_handlers(cls._logger_)\n cls._logger_ = cls._verbose_level_ = None\n cls._timing_dict_, cls._time_cache_dict_ = {}, {}\n\n @classmethod\n def start_timer(cls, name):\n if name in cls._time_cache_dict_:\n print(\n f\"{cls.warning_prefix}'{name}' was already in time cache dict, \"\n \"this may cause by calling `start_timer` repeatedly\"\n )\n return\n cls._time_cache_dict_[name] = time.time()\n\n @classmethod\n def end_timer(cls, name):\n start_time = cls._time_cache_dict_.pop(name, None)\n if start_time is None:\n print(\n f\"{cls.warning_prefix}'{name}' was not found in time cache dict, \"\n \"this may cause by not calling `start_timer` method\"\n )\n return\n incrementer = cls._timing_dict_.setdefault(name, Incrementer())\n incrementer.update(time.time() - start_time)\n\n def log_timing(self):\n timing_str_list = [\"=\" * 138]\n for name in sorted(self._timing_dict_.keys()):\n incrementer = self._timing_dict_[name]\n timing_str_list.append(\n f\"| {name:<82s} | \"\n f\"{fix_float_to_length(incrementer.mean, 10)} ± \"\n f\"{fix_float_to_length(incrementer.std, 10)} | \"\n f\"{incrementer.n_record:>12d} hits |\"\n )\n timing_str_list.append(\"-\" * 138)\n self.log_block_msg(\n \"\\n\".join(timing_str_list),\n title=\"timing\",\n verbose_level=None,\n msg_level=logging.DEBUG,\n )\n return self\n\n\nclass PureLoggingMixin:\n \"\"\"\n Mixin class to provide (pure) logging method for base class.\n\n Attributes\n ----------\n _loggers_ : dict(int, logging.Logger)\n Recorded all loggers initialized.\n\n _formatter_ : _Formatter\n Formatter for all loggers.\n\n Methods\n ----------\n log_msg(self, name, msg, msg_level=logging.INFO)\n Log something to a file, with logger initialized by `name`.\n\n log_block_msg(self, name, title, body, msg_level=logging.INFO)\n Almost the same as `log_msg`, except adding `title` on top of `body`.\n\n \"\"\"\n\n _name = _meta_name = None\n\n _formatter_ = LoggingMixin._formatter_\n _loggers_: Dict[str, logging.Logger] = {}\n _logger_paths_: Dict[str, str] = {}\n _timing_dict_ = {}\n\n @property\n def meta_suffix(self):\n return \"\" if self._meta_name is None else self._meta_name\n\n @property\n def name_suffix(self):\n return \"\" if self._name is None else f\"-{self._name}\"\n\n @property\n def meta_log_name(self):\n return f\"__meta__{self.meta_suffix}{self.name_suffix}\"\n\n @staticmethod\n def get_logging_path(logger):\n logging_path = None\n for handler in logger.handlers:\n if isinstance(handler, logging.FileHandler):\n logging_path = handler.baseFilename\n break\n if logging_path is None:\n raise ValueError(f\"No FileHandler was found in given logger '{logger}'\")\n return logging_path\n\n def _get_logger_info(self, name):\n logger = name if isinstance(name, logging.Logger) else self._loggers_.get(name)\n if logger is None:\n raise ValueError(\n f\"logger for '{name}' is not defined, \"\n \"please call `_setup_logger` first\"\n )\n if isinstance(name, str):\n logging_path = self._logger_paths_[name]\n else:\n logging_path = self.get_logging_path(logger)\n return logger, os.path.dirname(logging_path), logging_path\n\n def _setup_logger(self, name, logging_path, level=logging.DEBUG):\n if name in self._loggers_:\n return\n console = logging.StreamHandler()\n console.setLevel(logging.CRITICAL)\n console.setFormatter(self._formatter_)\n file_handler = logging.FileHandler(logging_path)\n file_handler.setFormatter(self._formatter_)\n file_handler.setLevel(level)\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n LoggingMixin._release_handlers(logger)\n logger.addHandler(console)\n logger.addHandler(file_handler)\n PureLoggingMixin._loggers_[name] = logger\n PureLoggingMixin._logger_paths_[name] = logging_path\n for handler in logging.getLogger().handlers:\n handler.setLevel(logging.CRITICAL)\n self.log_block_msg(name, \"system version\", sys.version)\n\n def _log_meta_msg(self, msg, msg_level=logging.INFO, frame=None):\n if frame is None:\n frame = inspect.currentframe().f_back\n self.log_msg(self.meta_log_name, msg, msg_level, frame)\n\n def _log_with_meta(self, task_name, msg, msg_level=logging.INFO, frame=None):\n if frame is None:\n frame = inspect.currentframe().f_back\n self._log_meta_msg(f\"{task_name} {msg}\", msg_level, frame)\n self.log_msg(task_name, f\"current task {msg}\", msg_level, frame)\n\n def log_msg(self, name, msg, msg_level=logging.INFO, frame=None):\n logger, logging_folder, logging_path = self._get_logger_info(name)\n with lock_manager(\n logging_folder,\n [logging_path],\n clear_stuffs_after_exc=False,\n ):\n logger.log(\n msg_level,\n msg,\n extra={\n \"custom_prefix\": \"\",\n \"func_prefix\": LoggingMixin._get_func_prefix(frame),\n },\n )\n return logger\n\n def log_block_msg(self, name, title, body, msg_level=logging.INFO, frame=None):\n frame = LoggingMixin._get_func_prefix(frame, False)\n self.log_msg(name, f\"{title}\\n{body}\\n\", msg_level, frame)\n\n def exception(self, name, msg, frame=None):\n logger, logging_folder, logging_path = self._get_logger_info(name)\n with lock_manager(\n logging_folder,\n [logging_path],\n clear_stuffs_after_exc=False,\n ):\n logger.exception(\n msg,\n extra={\n \"custom_prefix\": LoggingMixin.error_prefix,\n \"func_prefix\": LoggingMixin._get_func_prefix(frame),\n },\n )\n\n def del_logger(self, name):\n logger = self.log_msg(name, f\"clearing up logger information of '{name}'\")\n del self._loggers_[name], self._logger_paths_[name]\n LoggingMixin._release_handlers(logger)\n del logger\n\n\nclass SavingMixin(LoggingMixin):\n \"\"\"\n Mixin class to provide logging & saving method for base class.\n\n Warnings\n ----------\n We require base class to define every property appeared in __init__ (args, kwargs)\n if we want to utilize `SavingMixin.load_with`\n\n ```python\n class Foo:\n def __init__(self, a, *, b):\n # these are required\n self.a = a\n self.b = b\n # others could be customized\n ...\n ```\n\n Methods\n ----------\n save(self, folder)\n Save `self` to folder.\n\n def load(self, folder)\n Load from folder.\n\n \"\"\"\n\n @property\n @abstractmethod\n def data_tuple_base(self) -> Optional[Type[NamedTuple]]:\n pass\n\n @property\n @abstractmethod\n def data_tuple_attributes(self) -> Optional[List[str]]:\n pass\n\n @property\n def cache_excludes(self):\n return set()\n\n @property\n def lock_verbose(self):\n verbose_level = getattr(self, \"_verbose_level\", None)\n if verbose_level is None:\n return False\n return verbose_level >= 5\n\n def _data_tuple_context(self, *, is_saving: bool):\n return data_tuple_saving_controller(self, is_saving=is_saving)\n\n def save(self, folder: str, *, compress: bool = True):\n with self._data_tuple_context(is_saving=True):\n Saving.save_instance(self, folder, self.log_msg)\n if compress:\n abs_folder = os.path.abspath(folder)\n base_folder = os.path.dirname(abs_folder)\n with lock_manager(base_folder, [folder]):\n Saving.compress(abs_folder, remove_original=True)\n return self\n\n def load(self, folder: str, *, compress: bool = True):\n base_folder = os.path.dirname(os.path.abspath(folder))\n with lock_manager(base_folder, [folder]):\n with Saving.compress_loader(\n folder,\n compress,\n remove_extracted=True,\n logging_mixin=self,\n ):\n with self._data_tuple_context(is_saving=False):\n Saving.load_instance(self, folder, log_method=self.log_msg)\n return self\n\n @staticmethod\n def load_with(cls: Type, folder: str, *, compress: bool = True) -> Any:\n # load every thing into a dummy instance\n dummy = type(cls.__name__, (SavingMixin,), {})()\n dummy.load(folder, compress=compress)\n # inject everything into the target instance\n spec = inspect.getfullargspec(cls.__init__)\n args = spec.args[1:]\n kwargs_keys = spec.kwonlyargs\n arg_values = tuple(getattr(dummy, arg) for arg in args)\n kwargs = {key: getattr(dummy, key) for key in kwargs_keys}\n instance = cls(*arg_values, **kwargs)\n update_dict(dummy.__dict__, instance.__dict__)\n return instance\n\n\nclass Saving(LoggingMixin):\n \"\"\"\n Util class for saving instances.\n\n Methods\n ----------\n save_instance(instance, folder, log_method=None)\n Save instance to `folder`.\n * instance : object, instance to save.\n * folder : str, folder to save to.\n * log_method : {None, function}, used as `log_method` parameter in\n `log_with_external_method` method of `LoggingMixin`.\n\n load_instance(instance, folder, log_method=None)\n Load instance from `folder`.\n * instance : object, instance to load, need to be initialized.\n * folder : str, folder to load from.\n * log_method : {None, function}, used as `log_method` parameter in\n `log_with_external_method` method of `LoggingMixin`.\n\n \"\"\"\n\n delim = \"^_^\"\n dill_suffix = \".pkl\"\n array_sub_folder = \"__arrays\"\n\n @staticmethod\n def _check_core(elem):\n if isinstance(elem, dict):\n if not Saving._check_dict(elem):\n return False\n if isinstance(elem, (list, tuple)):\n if not Saving._check_list_and_tuple(elem):\n return False\n if not Saving._check_elem(elem):\n return False\n return True\n\n @staticmethod\n def _check_elem(elem):\n if isinstance(elem, (type, np.generic, np.ndarray)):\n return False\n if callable(elem):\n return False\n try:\n json.dumps({\"\": elem})\n return True\n except TypeError:\n return False\n\n @staticmethod\n def _check_list_and_tuple(arr: Union[list, tuple]):\n for elem in arr:\n if not Saving._check_core(elem):\n return False\n return True\n\n @staticmethod\n def _check_dict(d: dict):\n for v in d.values():\n if not Saving._check_core(v):\n return False\n return True\n\n @staticmethod\n def save_dict(d: dict, name: str, folder: str) -> str:\n if Saving._check_dict(d):\n kwargs = {}\n suffix, method, mode = \".json\", json.dump, \"w\"\n else:\n kwargs = {\"recurse\": True}\n suffix, method, mode = Saving.dill_suffix, dill.dump, \"wb\"\n file = os.path.join(folder, f\"{name}{suffix}\")\n with open(file, mode) as f:\n method(d, f, **kwargs)\n return os.path.abspath(file)\n\n @staticmethod\n def load_dict(name: str, folder: str = None):\n if folder is None:\n folder, name = os.path.split(name)\n name, suffix = os.path.splitext(name)\n if not suffix:\n json_file = os.path.join(folder, f\"{name}.json\")\n if os.path.isfile(json_file):\n with open(json_file, \"r\") as f:\n return json.load(f)\n dill_file = os.path.join(folder, f\"{name}{Saving.dill_suffix}\")\n if os.path.isfile(dill_file):\n with open(dill_file, \"rb\") as f:\n return dill.load(f)\n else:\n assert_msg = f\"suffix should be either 'json' or 'pkl', {suffix} found\"\n assert suffix in {\".json\", \".pkl\"}, assert_msg\n name = f\"{name}{suffix}\"\n file = os.path.join(folder, name)\n if os.path.isfile(file):\n if suffix == \".json\":\n mode, load_method = \"r\", json.load\n else:\n mode, load_method = \"rb\", dill.load\n with open(file, mode) as f:\n return load_method(f)\n raise ValueError(f\"config '{name}' is not found under '{folder}' folder\")\n\n @staticmethod\n def deep_copy_dict(d: dict):\n tmp_folder = os.path.join(os.getcwd(), \"___tmp_dict_cache___\")\n if os.path.isdir(tmp_folder):\n shutil.rmtree(tmp_folder)\n os.makedirs(tmp_folder)\n dict_name = \"deep_copy\"\n Saving.save_dict(d, dict_name, tmp_folder)\n loaded_dict = Saving.load_dict(dict_name, tmp_folder)\n shutil.rmtree(tmp_folder)\n return loaded_dict\n\n @staticmethod\n def get_cache_file(instance):\n return f\"{type(instance).__name__}.pkl\"\n\n @staticmethod\n def save_instance(instance, folder, log_method=None):\n instance_str = str(instance)\n Saving.log_with_external_method(\n f\"saving '{instance_str}' to '{folder}'\",\n Saving.info_prefix,\n log_method,\n 5,\n )\n\n def _record_array(k, v):\n extension = \".npy\" if isinstance(v, np.ndarray) else \".lst\"\n array_attribute_dict[f\"{k}{extension}\"] = v\n\n def _check_array(attr_key_, attr_value_, depth=0):\n if isinstance(attr_value_, dict):\n for k in list(attr_value_.keys()):\n v = attr_value_[k]\n extended_k = f\"{attr_key_}{delim}{k}\"\n if isinstance(v, dict):\n _check_array(extended_k, v, depth + 1)\n elif isinstance(v, array_types):\n _record_array(extended_k, v)\n attr_value_.pop(k)\n if isinstance(attr_value_, array_types):\n _record_array(attr_key_, attr_value_)\n if depth == 0:\n cache_excludes.add(attr_key_)\n\n main_file = Saving.get_cache_file(instance)\n instance_dict = shallow_copy_dict(instance.__dict__)\n verbose, cache_excludes = map(\n getattr,\n [instance] * 2,\n [\"lock_verbose\", \"cache_excludes\"],\n [False, set()],\n )\n if os.path.isdir(folder):\n if verbose:\n prefix = Saving.warning_prefix\n msg = f\"'{folder}' will be cleaned up when saving '{instance_str}'\"\n Saving.log_with_external_method(\n msg, prefix, log_method, msg_level=logging.WARNING\n )\n shutil.rmtree(folder)\n save_path = os.path.join(folder, main_file)\n array_folder = os.path.join(folder, Saving.array_sub_folder)\n tuple(\n map(\n lambda folder_: os.makedirs(folder_, exist_ok=True),\n [folder, array_folder],\n )\n )\n sorted_attributes, array_attribute_dict = sorted(instance_dict), {}\n delim, array_types = Saving.delim, (list, np.ndarray)\n for attr_key in sorted_attributes:\n if attr_key in cache_excludes:\n continue\n attr_value = instance_dict[attr_key]\n _check_array(attr_key, attr_value)\n cache_excludes.add(\"_verbose_level_\")\n with lock_manager(\n folder,\n [os.path.join(folder, main_file)],\n name=instance_str,\n ):\n with open(save_path, \"wb\") as f:\n d = {k: v for k, v in instance_dict.items() if k not in cache_excludes}\n dill.dump(d, f, recurse=True)\n if array_attribute_dict:\n sorted_array_files = sorted(array_attribute_dict)\n sorted_array_files_full_path = list(\n map(lambda f_: os.path.join(array_folder, f_), sorted_array_files)\n )\n with lock_manager(\n array_folder,\n sorted_array_files_full_path,\n name=f\"{instance_str} (arrays)\",\n ):\n for array_file, array_file_full_path in zip(\n sorted_array_files, sorted_array_files_full_path\n ):\n array_value = array_attribute_dict[array_file]\n if array_file.endswith(\".npy\"):\n np.save(array_file_full_path, array_value)\n elif array_file.endswith(\".lst\"):\n with open(array_file_full_path, \"wb\") as f:\n np.save(f, array_value)\n else:\n raise ValueError(\n f\"unrecognized file type '{array_file}' occurred\"\n )\n\n @staticmethod\n def load_instance(instance, folder, *, log_method=None, verbose=True):\n if verbose:\n Saving.log_with_external_method(\n f\"loading '{instance}' from '{folder}'\",\n Saving.info_prefix,\n log_method,\n 5,\n )\n with open(os.path.join(folder, Saving.get_cache_file(instance)), \"rb\") as f:\n instance.__dict__.update(dill.load(f))\n delim = Saving.delim\n array_folder = os.path.join(folder, Saving.array_sub_folder)\n for array_file in os.listdir(array_folder):\n attr_name, attr_ext = os.path.splitext(array_file)\n if attr_ext == \".npy\":\n load_method = partial(np.load, allow_pickle=True)\n elif attr_ext == \".lst\":\n\n def load_method(path):\n return np.load(path, allow_pickle=True).tolist()\n\n else:\n raise ValueError(f\"unrecognized file type '{array_file}' occurred\")\n array_value = load_method(os.path.join(array_folder, array_file))\n attr_hierarchy = attr_name.split(delim)\n if len(attr_hierarchy) == 1:\n instance.__dict__[attr_name] = array_value\n else:\n hierarchy_dict = instance.__dict__\n for attr in attr_hierarchy[:-1]:\n hierarchy_dict = hierarchy_dict.setdefault(attr, {})\n hierarchy_dict[attr_hierarchy[-1]] = array_value\n\n @staticmethod\n def prepare_folder(instance, folder):\n if os.path.isdir(folder):\n instance.log_msg(\n f\"'{folder}' already exists, it will be cleared up to save our model\",\n instance.warning_prefix,\n msg_level=logging.WARNING,\n )\n shutil.rmtree(folder)\n os.makedirs(folder)\n\n @staticmethod\n def compress(abs_folder, remove_original=True):\n shutil.make_archive(abs_folder, \"zip\", abs_folder)\n if remove_original:\n shutil.rmtree(abs_folder)\n\n @staticmethod\n def compress_loader(\n folder: str,\n is_compress: bool,\n *,\n remove_extracted: bool = True,\n logging_mixin: Optional[LoggingMixin] = None,\n ):\n class _manager(context_error_handler):\n def __enter__(self):\n if is_compress:\n if os.path.isdir(folder):\n msg = (\n f\"'{folder}' already exists, \"\n \"it will be cleared up to load our model\"\n )\n if logging_mixin is None:\n print(msg)\n else:\n logging_mixin.log_msg(\n msg,\n logging_mixin.warning_prefix,\n msg_level=logging.WARNING,\n )\n shutil.rmtree(folder)\n with zipfile.ZipFile(f\"{folder}.zip\", \"r\") as zip_ref:\n zip_ref.extractall(folder)\n\n def _normal_exit(self, exc_type, exc_val, exc_tb):\n if is_compress and remove_extracted:\n shutil.rmtree(folder)\n\n return _manager()\n\n\ncandidate_type = List[Any]\ncandidates_type = Union[List[candidate_type], Dict[str, candidate_type]]\n\n\nclass Grid:\n \"\"\"\n Util class provides permutation of simple, flattened candidates.\n * For permutation of complex, nested param dicts, please refers to\n `ParamGenerator` in `cftool.param_utils.core`.\n\n Parameters\n ----------\n candidates : candidates_type, cadidates we want to create grid from.\n\n Examples\n ----------\n >>> from cftool.misc import Grid\n >>>\n >>> grid = Grid({\"a\": [1, 2, 3], \"b\": [1, 2, 3]})\n >>> for param in grid:\n >>> print(param)\n >>> # output : {'a': 1, 'b': 1}, {'a': 1, 'b': 2}, {'a': 1, 'b': 3}\n >>> # {'a': 2, 'b': 1}, ..., {'a': 3, 'b': 3}\n\n \"\"\"\n\n def __init__(self, candidates: candidates_type):\n self.candidates = candidates\n self._is_list = isinstance(candidates, list)\n\n @staticmethod\n def _yield_lists(lists):\n yield from map(list, product(*lists))\n\n def __iter__(self):\n if self._is_list:\n yield from self._yield_lists(self.candidates)\n else:\n items = sorted(self.candidates.items())\n if not items:\n yield {}\n else:\n keys, values = zip(*items)\n for v in map(list, product(*values)):\n yield dict(zip(keys, v))\n\n\nnested_type = Dict[str, Union[Any, Dict[str, \"nested_type\"]]]\nall_nested_type = Dict[str, Union[List[Any], Dict[str, \"all_nested_type\"]]]\nunion_nested_type = Union[nested_type, all_nested_type]\nflattened_type = Dict[str, Any]\nall_flattened_type = Dict[str, List[Any]]\nunion_flattened_type = Union[flattened_type, all_flattened_type]\n\n\ndef _offset_fn(value) -> int:\n if not isinstance(value, (list, tuple)):\n return 1\n return len(value)\n\n\nclass Nested:\n def __init__(\n self,\n nested: union_nested_type,\n *,\n offset_fn: Callable[[Any], int] = _offset_fn,\n delim: str = \"^_^\",\n ):\n self.nested = nested\n self.offset_fn, self.delim = offset_fn, delim\n self._flattened = self._sorted_flattened_keys = None\n self._sorted_flattened_offsets = None\n\n def apply(self, fn: Callable[[Any], Any]) -> \"Nested\":\n def _apply(src, tgt):\n for k, v in src.items():\n if isinstance(v, dict):\n next_tgt = tgt.setdefault(k, {})\n _apply(v, next_tgt)\n else:\n tgt[k] = fn(v)\n return tgt\n\n return Nested(_apply(self.nested, {}))\n\n @property\n def flattened(self) -> union_flattened_type:\n if self._flattened is None:\n self._flattened = self.flatten_nested(self.nested)\n return self._flattened\n\n @property\n def sorted_flattened_keys(self) -> List[str]:\n if self._sorted_flattened_keys is None:\n self._sorted_flattened_keys = sorted(self.flattened)\n return self._sorted_flattened_keys\n\n @property\n def sorted_flattened_offsets(self) -> List[int]:\n if self._sorted_flattened_offsets is None:\n offsets = []\n for key in self.sorted_flattened_keys:\n value = self.get_value_from(key)\n offsets.append(self.offset_fn(value))\n self._sorted_flattened_offsets = offsets\n return self._sorted_flattened_offsets\n\n def get_value_from(self, flattened_key: str) -> Any:\n value = self.nested\n for sub_key in flattened_key.split(self.delim):\n value = value[sub_key]\n return value\n\n def flatten_nested(self, nested: nested_type) -> nested_type:\n flattened = []\n\n def _flatten(d, pre_key: Union[None, str]):\n for name, value in d.items():\n if pre_key is None:\n next_pre_key = name\n else:\n next_pre_key = f\"{pre_key}{self.delim}{name}\"\n if isinstance(value, dict):\n _flatten(value, next_pre_key)\n else:\n flattened.append((next_pre_key, value))\n return flattened\n\n return dict(_flatten(nested, None))\n\n def nest_flattened(self, flattened: flattened_type) -> nested_type:\n sorted_pairs = sorted(\n map(lambda k, v: (k.split(self.delim), v), *zip(*flattened.items())),\n key=len,\n )\n nested = {}\n for key_list, value in sorted_pairs:\n if len(key_list) == 1:\n nested[key_list[0]] = value\n else:\n parent = nested.setdefault(key_list[0], {})\n for key in key_list[1:-1]:\n parent = parent.setdefault(key, {})\n parent[key_list[-1]] = value\n return nested\n\n def flattened2array(self, flattened: flattened_type) -> np.ndarray:\n value_list = []\n for key in self.sorted_flattened_keys:\n value = flattened[key]\n value = list(value) if isinstance(value, (list, tuple)) else [value]\n value_list.extend(value)\n return np.array(value_list, np.float32)\n\n def array2flattened(self, array: np.ndarray) -> flattened_type:\n cursor = 0\n flattened = {}\n for key, offset in zip(\n self.sorted_flattened_keys,\n self.sorted_flattened_offsets,\n ):\n end = cursor + offset\n if offset == 1:\n value = array[cursor]\n else:\n value = array[cursor:end].tolist()\n if isinstance(value, tuple):\n value = tuple(value)\n flattened[key] = value\n cursor = end\n return flattened\n\n\nclass Sampler:\n \"\"\"\n Util class which can help sampling indices from probabilities.\n\n Parameters\n ----------\n method : str, sampling method.\n * Currently only 'multinomial' is supported.\n probabilities : np.ndarray, probabilities we'll use.\n\n Examples\n ----------\n >>> import numpy as np\n >>> arr = [[0.1, 0.2, 0.3, 0.4], [0.4, 0.3, 0.2, 0.1]]\n >>> probabilities = np.array(arr, np.float32)\n >>> sampler = Sampler(\"multinomial\", probabilities)\n >>> print(sampler.sample(10))\n\n \"\"\"\n\n def __init__(self, method: str, probabilities: np.ndarray):\n self.method = method\n self.p = probabilities\n self._p_shape = list(self.p.shape)\n if self.is_flat:\n self._p_block = self.p\n else:\n self._p_block = self.p.reshape([-1, self._p_shape[-1]])\n\n def __str__(self):\n return f\"Sampler({self.method})\"\n\n __repr__ = __str__\n\n @property\n def is_flat(self):\n return len(self._p_shape) == 1\n\n def _reshape(self, n: int, samples: np.ndarray) -> np.ndarray:\n return samples.reshape([n] + self._p_shape[:-1]).astype(np.int64)\n\n def sample(self, n: int) -> np.ndarray:\n return getattr(self, self.method)(n)\n\n @staticmethod\n def _multinomial_flat(n: int, p: np.ndarray) -> np.ndarray:\n samples = np.random.multinomial(n, p)\n return np.repeat(np.arange(len(p)), samples)\n\n def multinomial(self, n: int) -> np.ndarray:\n if self.is_flat:\n sampled_indices = self._multinomial_flat(n, self.p)\n else:\n stacks = [self._multinomial_flat(n, p) for p in self._p_block]\n sampled_indices = np.vstack(stacks).T\n return self._reshape(n, sampled_indices)\n\n\n# contexts\n\n\nclass context_error_handler:\n \"\"\" Util class which provides exception handling when using context manager. \"\"\"\n\n @property\n def exception_suffix(self):\n return \"\"\n\n def _normal_exit(self, exc_type, exc_val, exc_tb):\n pass\n\n def _exception_exit(self, exc_type, exc_val, exc_tb):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if not exc_type:\n self._normal_exit(exc_type, exc_val, exc_tb)\n else:\n self._exception_exit(exc_type, exc_val, exc_tb)\n\n\nclass timeit(context_error_handler):\n \"\"\"\n Timing context manager.\n\n Examples\n --------\n >>> with timeit(\"something\"):\n >>> # do something here\n >>> # will print \"> [ info ] timing for something : x.xxxx\"\n\n \"\"\"\n\n def __init__(self, msg, precision=6):\n self._msg = msg\n self._p = precision\n\n def __enter__(self):\n self._t = time.time()\n\n def _normal_exit(self, exc_type, exc_val, exc_tb):\n prefix = LoggingMixin.info_prefix\n print(\n f\"{prefix}timing for {self._msg:^16s} : \"\n f\"{time.time() - self._t:{self._p}.{self._p-2}f}\"\n )\n\n\nclass _lock_file_refresher(threading.Thread):\n def __init__(self, lock_file, delay=1, refresh=0.01):\n super().__init__()\n self.__stop_event = threading.Event()\n self._lock_file, self._delay, self._refresh = lock_file, delay, refresh\n with open(lock_file, \"r\") as f:\n self._lock_file_contents = f.read()\n\n def run(self) -> None:\n counter = 0\n while True:\n counter += 1\n time.sleep(self._refresh)\n if counter * self._refresh >= self._delay:\n counter = 0\n with open(self._lock_file, \"w\") as f:\n prefix = \"\\n\\n\"\n add_line = f\"{prefix}refreshed at {timestamp()}\"\n f.write(self._lock_file_contents + add_line)\n if self.__stop_event.is_set():\n break\n\n def stop(self):\n self.__stop_event.set()\n\n\nclass lock_manager(context_error_handler, LoggingMixin):\n \"\"\"\n Util class to make simultaneously-write process safe with some\n hacked (ugly) tricks.\n\n Examples\n --------\n >>> import dill\n >>> workplace = \"_cache\"\n >>> target_write_files_full_path = [\n >>> os.path.join(workplace, \"file1.pkl\"),\n >>> os.path.join(workplace, \"file2.pkl\")\n >>> ]\n >>> with lock_manager(workplace, target_write_files_full_path):\n >>> for write_file_full_path in target_write_files_full_path:\n >>> with open(write_file_full_path, \"wb\") as wf:\n >>> dill.dump(..., wf)\n\n \"\"\"\n\n delay = 0.01\n __lock__ = \"__lock__\"\n\n def __init__(\n self,\n workplace,\n stuffs,\n verbose_level=None,\n set_lock=True,\n clear_stuffs_after_exc=True,\n name=None,\n wait=1000,\n ):\n self._workplace = workplace\n self._verbose_level = verbose_level\n self._name, self._wait = name, wait\n os.makedirs(workplace, exist_ok=True)\n self._stuffs, self._set_lock = stuffs, set_lock\n self._clear_stuffs = clear_stuffs_after_exc\n self._is_locked = False\n\n def __enter__(self):\n frame = inspect.currentframe().f_back\n self.log_msg(\n f\"waiting for lock at {self.lock_file}\",\n self.info_prefix,\n 5,\n logging.DEBUG,\n frame,\n )\n enter_time = file_modify = None\n while True:\n try:\n fd = os.open(self.lock_file, os.O_CREAT | os.O_EXCL | os.O_RDWR)\n self.log_msg(\n \"lock acquired\",\n self.info_prefix,\n 5,\n logging.DEBUG,\n frame,\n )\n if not self._set_lock:\n self.log_msg(\n \"releasing lock since set_lock=False\",\n self.info_prefix,\n 5,\n logging.DEBUG,\n frame,\n )\n os.unlink(self.lock_file)\n self.__refresher = None\n else:\n self.log_msg(\n \"writing info to lock file\",\n self.info_prefix,\n 5,\n logging.DEBUG,\n frame,\n )\n with os.fdopen(fd, \"a\") as f:\n f.write(\n f\"name : {self._name}\\n\"\n f\"timestamp : {timestamp()}\\n\"\n f\"workplace : {self._workplace}\\n\"\n f\"stuffs :\\n{self.cache_stuffs_str}\"\n )\n self.__refresher = _lock_file_refresher(self.lock_file)\n self.__refresher.start()\n break\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n try:\n if file_modify is None:\n enter_time = time.time()\n file_modify = os.path.getmtime(self.lock_file)\n else:\n new_file_modify = os.path.getmtime(self.lock_file)\n if new_file_modify != file_modify:\n enter_time = time.time()\n file_modify = new_file_modify\n else:\n wait_time = time.time() - enter_time\n if wait_time >= self._wait:\n raise ValueError(\n f\"'{self.lock_file}' has been waited \"\n f\"for too long ({wait_time})\"\n )\n time.sleep(random.random() * self.delay + self.delay)\n except ValueError:\n msg = f\"lock_manager was blocked by dead lock ({self.lock_file})\"\n self.exception(msg)\n raise\n except FileNotFoundError:\n pass\n self.log_block_msg(\n self.cache_stuffs_str,\n title=\"start processing following stuffs:\",\n verbose_level=5,\n msg_level=logging.DEBUG,\n frame=frame,\n )\n self._is_locked = True\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n if self.__refresher is not None:\n self.__refresher.stop()\n self.__refresher.join()\n if self._set_lock:\n super().__exit__(exc_type, exc_val, exc_tb)\n\n def _normal_exit(self, exc_type, exc_val, exc_tb, frame=None):\n if self._set_lock:\n os.unlink(self.lock_file)\n if frame is None:\n frame = inspect.currentframe().f_back.f_back.f_back\n self.log_msg(\"lock released\", self.info_prefix, 5, logging.DEBUG, frame)\n\n def _exception_exit(self, exc_type, exc_val, exc_tb):\n frame = inspect.currentframe().f_back.f_back.f_back\n if self._clear_stuffs:\n for stuff in self._stuffs:\n if os.path.isfile(stuff):\n self.log_msg(\n f\"clearing cached file: {stuff}\",\n \">> \",\n 5,\n logging.ERROR,\n frame,\n )\n os.remove(stuff)\n elif os.path.isdir(stuff):\n self.log_msg(\n f\"clearing cached directory: {stuff}\",\n \">> \",\n 5,\n logging.ERROR,\n frame,\n )\n shutil.rmtree(stuff)\n self._normal_exit(exc_type, exc_val, exc_tb, frame)\n\n @property\n def locked(self):\n return self._is_locked\n\n @property\n def available(self):\n return not os.path.isfile(self.lock_file)\n\n @property\n def cache_stuffs_str(self):\n return \"\\n\".join([f\">> {stuff}\" for stuff in self._stuffs])\n\n @property\n def exception_suffix(self):\n return f\", clearing caches for safety{self.logging_suffix}\"\n\n @property\n def lock_file(self):\n return os.path.join(self._workplace, self.__lock__)\n\n @property\n def logging_suffix(self):\n return \"\" if self._name is None else f\" - {self._name}\"\n\n\nclass batch_manager(context_error_handler):\n \"\"\"\n Process data in batch.\n\n Parameters\n ----------\n inputs : tuple(np.ndarray), auxiliary array inputs.\n n_elem : {int, float}, indicates how many elements will be processed in a batch.\n batch_size : int, indicates the batch_size; if None, batch_size will be\n calculated by `n_elem`.\n\n Examples\n --------\n >>> with batch_manager(np.arange(5), np.arange(1, 6), batch_size=2) as manager:\n >>> for arr, tensor in manager:\n >>> print(arr, tensor)\n >>> # Will print:\n >>> # [0 1], [1 2]\n >>> # [2 3], [3 4]\n >>> # [4] , [5]\n\n \"\"\"\n\n def __init__(\n self,\n *inputs,\n n_elem: int = 1e6,\n batch_size: Optional[int] = None,\n max_batch_size: int = 1024,\n ):\n if not inputs:\n raise ValueError(\"inputs should be provided in general_batch_manager\")\n input_lengths = list(map(len, inputs))\n self._n, self._inputs = input_lengths[0], inputs\n assert_msg = \"inputs should be of same length\"\n assert all(length == self._n for length in input_lengths), assert_msg\n if batch_size is not None:\n self._batch_size = batch_size\n else:\n n_elem = int(n_elem)\n self._batch_size = int(\n n_elem / sum(map(lambda arr: prod(arr.shape[1:]), inputs))\n )\n self._batch_size = min(max_batch_size, min(self._n, self._batch_size))\n self._n_epoch = int(self._n / self._batch_size)\n self._n_epoch += int(self._n_epoch * self._batch_size < self._n)\n\n def __enter__(self):\n return self\n\n def __iter__(self):\n self._start, self._end = 0, self._batch_size\n return self\n\n def __next__(self):\n if self._start >= self._n:\n raise StopIteration\n batched_data = tuple(\n map(\n lambda arr: arr[self._start : self._end],\n self._inputs,\n )\n )\n self._start, self._end = self._end, self._end + self._batch_size\n if len(batched_data) == 1:\n return batched_data[0]\n return batched_data\n\n def __len__(self):\n return self._n_epoch\n\n\nclass timing_context(context_error_handler):\n \"\"\"\n Wrap codes in any base class of `LoggingMixin` with this timing context to timeit.\n\n Parameters\n ----------\n logging_mixin : LoggingMixin, arbitrary base classes of LoggingMixin.\n name : str, explain what the wrapped codes are doing.\n enable : bool, whether enable this `timing_context`.\n\n Examples\n --------\n >>> import time\n >>> import random\n >>> instance = type(\n >>> \"test\", (LoggingMixin,),\n >>> {\"config\": {}, \"_verbose_level\": 2}\n >>> )()._init_logging(2, True)\n >>> for _ in range(50):\n >>> with timing_context(instance, \"random sleep\"):\n >>> time.sleep(random.random() * 0.1)\n >>> instance.log_timing()\n\n \"\"\"\n\n def __init__(self, logging_mixin: LoggingMixin, name: str, *, enable: bool = True):\n self._cls, self._name = logging_mixin, name\n self._enable = enable\n\n @property\n def timer_name(self):\n return f\"[{type(self._cls).__name__:^24s}] {self._name}\"\n\n def __enter__(self):\n if self._enable:\n self._cls.start_timer(self.timer_name)\n\n def _normal_exit(self, exc_type, exc_val, exc_tb):\n if self._enable:\n self._cls.end_timer(self.timer_name)\n\n\nclass data_tuple_saving_controller(context_error_handler):\n \"\"\"\n Help saving DataTuple of SavingMixin.\n\n Parameters\n ----------\n instance : SavingMixin, instance whose DataTuples are to be saved / loaded.\n is_saving : bool, whether it is a saving context or not.\n \"\"\"\n\n __prefix__ = \"_Data_Tuple__\"\n\n def __init__(self, instance: SavingMixin, *, is_saving: bool):\n self._instance = instance\n self._is_saving = is_saving\n self._data_tuple_base = instance.data_tuple_base\n self._data_tuple_attributes = instance.data_tuple_attributes\n if self.trigger and is_saving:\n self._data_tuples: List[NamedTuple] = [\n instance.__dict__.pop(attr) for attr in self._data_tuple_attributes\n ]\n\n def __enter__(self):\n if self.trigger and self._is_saving:\n self.__tmp_attr_list = []\n for attr, data_tuple in zip(\n self._data_tuple_attributes,\n self._data_tuples,\n ):\n local_attr_list = self._get_attr(attr, data_tuple)\n for local_attr, data in zip(local_attr_list, data_tuple):\n setattr(self._instance, local_attr, data)\n self.__tmp_attr_list += local_attr_list\n\n @property\n def trigger(self):\n return (\n self._data_tuple_base is not None\n and self._data_tuple_attributes is not None\n )\n\n def _normal_exit(self, exc_type, exc_val, exc_tb):\n if self.trigger:\n if self._is_saving:\n for attr, data_tuple in zip(\n self._data_tuple_attributes,\n self._data_tuples,\n ):\n setattr(self._instance, attr, self._data_tuple_base(*data_tuple))\n for attr in self.__tmp_attr_list:\n self._instance.__dict__.pop(attr)\n del self._data_tuples\n else:\n attr_pool_map = self._get_attr()\n for core_attr, attr_dict in attr_pool_map.items():\n local_attr_values = []\n for idx in range(len(attr_dict)):\n local_attr = attr_dict[idx]\n local_attr_values.append(\n self._instance.__dict__.pop(local_attr)\n )\n setattr(\n self._instance,\n core_attr,\n self._data_tuple_base(*local_attr_values),\n )\n\n def _get_attr(\n self,\n attr: Optional[str] = None,\n data_tuple: Optional[NamedTuple] = None,\n ) -> Optional[Union[List[str], Dict[str, Dict[int, str]]]]:\n prefix = self.__prefix__\n if self._is_saving:\n if data_tuple is None:\n raise ValueError(\"data tuple should be provided in saving context\")\n assert isinstance(attr, str), \"attr should be string in saving context\"\n return [f\"{prefix}{attr}_{i}\" for i in range(len(data_tuple))]\n attr_pool = list(\n filter(\n lambda attr_: attr_.startswith(prefix),\n self._instance.__dict__.keys(),\n )\n )\n attr_pool_split = [attr_[len(prefix) :].split(\"_\") for attr_ in attr_pool]\n attr_pool_map = {}\n for attr, attr_split in zip(attr_pool, attr_pool_split):\n core_attr, idx = \"_\".join(attr_split[:-1]), int(attr_split[-1])\n local_map = attr_pool_map.setdefault(core_attr, {})\n local_map[idx] = attr\n loaded_attr_list = sorted(attr_pool_map)\n preset_attr_list = sorted(self._data_tuple_attributes)\n assert_msg = (\n f\"loaded attributes ({loaded_attr_list}) \"\n f\"are not identical with preset attributes ({preset_attr_list})\"\n )\n assert loaded_attr_list == preset_attr_list, assert_msg\n return attr_pool_map\n\n\n__all__ = [\n \"timestamp\",\n \"prod\",\n \"hash_code\",\n \"prefix_dict\",\n \"shallow_copy_dict\",\n \"update_dict\",\n \"fix_float_to_length\",\n \"truncate_string_to_length\",\n \"grouped\",\n \"grouped_into\",\n \"is_numeric\",\n \"get_one_hot\",\n \"show_or_save\",\n \"show_or_return\",\n \"get_indices_from_another\",\n \"UniqueIndices\",\n \"get_unique_indices\",\n \"get_counter_from_arr\",\n \"allclose\",\n \"register_core\",\n \"StrideArray\",\n \"Incrementer\",\n \"LoggingMixin\",\n \"PureLoggingMixin\",\n \"SavingMixin\",\n \"Saving\",\n \"Grid\",\n \"Sampler\",\n \"context_error_handler\",\n \"timeit\",\n \"lock_manager\",\n \"batch_manager\",\n \"timing_context\",\n \"data_tuple_saving_controller\",\n \"nested_type\",\n \"all_nested_type\",\n \"union_nested_type\",\n \"flattened_type\",\n \"all_flattened_type\",\n \"union_flattened_type\",\n \"Nested\",\n \"check\",\n \"SanityChecker\",\n]\n"
] | [
[
"numpy.split",
"numpy.allclose",
"numpy.unique",
"numpy.asarray",
"numpy.load",
"numpy.cumsum",
"matplotlib.pyplot.savefig",
"numpy.save",
"numpy.lib.stride_tricks.as_strided",
"numpy.random.multinomial",
"matplotlib.pyplot.close",
"numpy.searchsorted",
"numpy.argsort",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
liangstein/ByteNet-Keras | [
"e6722a69a316411fa563011a8ed37b0575d5a2db"
] | [
"ByteNet_train.py"
] | [
"import os;\nimport numpy as np;\nfrom keras.models import Model;\nfrom keras.layers.embeddings import Embedding;\nfrom keras.models import Sequential,load_model;\nfrom keras.optimizers import rmsprop,adam,adagrad,SGD;\nfrom keras.callbacks import EarlyStopping,ModelCheckpoint,ReduceLROnPlateau;\nfrom keras.preprocessing.text import text_to_word_sequence,one_hot,Tokenizer;\nfrom keras.layers import Input,Dense,merge,Dropout,BatchNormalization,Activation,Conv1D;\n\n# setting current working directory\nWKDIR=os.getcwd();\n\ndef load_dataset(batch_size,N=150000):\n French = list(np.load(WKDIR + \"/french_sentences.npy\")[:N]);# read dataset\n English = list(np.load(WKDIR + \"/english_sentences.npy\")[:N]);\n English = [i + \"\\n\" for i in English];# add ending signal at the sequence end\n while 1:\n if len(English) % batch_size != 0:\n del English[-1];\n del French[-1];\n else:\n break;\n return French,English;\n\ndef build_vacabulary(French,English):\n all_eng_words = [];\n all_french_words = [];\n for i in np.arange(0, len(French)):\n all_eng_words.append(English[i]);\n all_french_words.append(French[i]);\n tokeng = Tokenizer(char_level=True);\n tokeng.fit_on_texts(all_eng_words);\n eng_index = tokeng.word_index; # build character to index dictionary\n index_eng = dict((eng_index[i], i) for i in eng_index);\n tokita = Tokenizer(char_level=True);\n tokita.fit_on_texts(all_french_words);\n french_index = tokita.word_index; # build character to index dictionary\n index_french = dict((french_index[i], i) for i in french_index);\n return (eng_index,french_index,index_eng,index_french);\n\n# convert a batch of input sequences to tensors\ndef generate_batch_data(English,French,eng_index,french_index,batch_size):\n while 1:\n all_labels=np.arange(0,len(French));np.random.shuffle(all_labels);\n batch_labels=np.array_split(all_labels,int(len(French)*batch_size**-1));\n for labels in batch_labels:\n source_vec=np.zeros((batch_size,maxlen+1),dtype=np.uint16);\n target0_vec=np.zeros((batch_size,maxlen),dtype=np.uint16);\n target1_vec = np.zeros((batch_size, maxlen+1, len(eng_index)), dtype=np.uint16);\n sampleweights=np.zeros((batch_size,maxlen+1),dtype=np.uint16);\n for i,a in enumerate(labels):\n for j1,ele1 in enumerate(French[a]):\n source_vec[i,j1]=french_index[ele1];\n for j2,ele2 in enumerate(English[a][:-1]):\n target0_vec[i,j2]=eng_index[ele2];\n for j3,ele3 in enumerate(English[a]):\n target1_vec[i,j3,eng_index[ele3]-1]=1;\n sampleweights[i,j3]=1;# mask the loss function\n t0=np.zeros((batch_size,1,500),dtype=np.uint8);# beginning of target sequence\n yield ([source_vec,target0_vec,t0],target1_vec,sampleweights);\n\ndef build_model(french_index,eng_index,index_french,index_eng,English,French):\n input_sequence = Input(shape=(maxlen + 1,));\n input_tensor = Embedding(input_length=maxlen + 1, input_dim=len(french_index) + 1, output_dim=500)(input_sequence);\n encoder1 = Conv1D(filters=500, kernel_size=1, strides=1, padding=\"same\")(input_tensor);\n encoder1 = Activation(\"relu\")(encoder1);\n encoder1 = Conv1D(filters=250, kernel_size=5, strides=1, padding=\"same\", dilation_rate=1)(encoder1);\n encoder1 = BatchNormalization(axis=-1)(encoder1);\n encoder1 = Activation(\"relu\")(encoder1);\n encoder1 = Conv1D(filters=500, kernel_size=1, strides=1, padding=\"same\")(encoder1);\n input_tensor = merge([input_tensor, encoder1], mode=\"sum\");\n encoder2 = BatchNormalization(axis=-1)(input_tensor);\n encoder2 = Activation(\"relu\")(encoder2);\n encoder2 = Conv1D(filters=500, kernel_size=1, strides=1, padding=\"same\")(input_tensor);\n encoder2 = BatchNormalization(axis=-1)(encoder2);\n encoder2 = Activation(\"relu\")(encoder2);\n encoder2 = Conv1D(filters=250, kernel_size=5, strides=1, padding=\"same\", dilation_rate=2)(encoder2);\n encoder2 = BatchNormalization(axis=-1)(encoder2);\n encoder2 = Activation(\"relu\")(encoder2);\n encoder2 = Conv1D(filters=500, kernel_size=1, strides=1, padding=\"same\")(encoder2);\n input_tensor = merge([input_tensor, encoder2], mode=\"sum\");\n encoder3 = BatchNormalization(axis=-1)(input_tensor);\n encoder3 = Activation(\"relu\")(encoder3);\n encoder3 = Conv1D(filters=500, kernel_size=1, strides=1, padding=\"same\")(encoder3);\n encoder3 = BatchNormalization(axis=-1)(encoder3);\n encoder3 = Activation(\"relu\")(encoder3);\n encoder3 = Conv1D(filters=250, kernel_size=5, strides=1, padding=\"same\", dilation_rate=4)(encoder3);\n encoder3 = BatchNormalization(axis=-1)(encoder3);\n encoder3 = Activation(\"relu\")(encoder3);\n encoder3 = Conv1D(filters=500, kernel_size=1, strides=1, padding=\"same\")(encoder3);\n input_tensor = merge([input_tensor, encoder3], mode=\"sum\");\n encoder4 = BatchNormalization(axis=-1)(input_tensor);\n encoder4 = Activation(\"relu\")(encoder4);\n encoder4 = Conv1D(filters=500, kernel_size=1, strides=1, padding=\"same\")(encoder4);\n encoder4 = BatchNormalization(axis=-1)(encoder4);\n encoder4 = Activation(\"relu\")(encoder4);\n encoder4 = Conv1D(filters=250, kernel_size=5, strides=1, padding=\"same\", dilation_rate=8)(encoder4);\n encoder4 = BatchNormalization(axis=-1)(encoder4);\n encoder4 = Activation(\"relu\")(encoder4);\n encoder4 = Conv1D(filters=500, kernel_size=1, strides=1, padding=\"same\")(encoder4);\n input_tensor = merge([input_tensor, encoder4], mode=\"sum\");\n encoder5 = BatchNormalization(axis=-1)(input_tensor);\n encoder5 = Activation(\"relu\")(encoder5);\n encoder5 = Conv1D(filters=500, kernel_size=1, strides=1, padding=\"same\")(encoder5);\n encoder5 = BatchNormalization(axis=-1)(encoder5);\n encoder5 = Activation(\"relu\")(encoder5);\n encoder5 = Conv1D(filters=250, kernel_size=5, strides=1, padding=\"same\", dilation_rate=16)(encoder5);\n encoder5 = BatchNormalization(axis=-1)(encoder5);\n encoder5 = Activation(\"relu\")(encoder5);\n encoder5 = Conv1D(filters=500, kernel_size=1, strides=1, padding=\"same\")(encoder5);\n input_tensor = merge([input_tensor, encoder5], mode=\"sum\");\n input_tensor = Activation(\"relu\")(input_tensor);\n input_tensor = Conv1D(filters=500, kernel_size=1, padding=\"same\", activation=\"relu\")(input_tensor);\n target_sequence = Input(shape=(maxlen,));\n t0 = Input(shape=(1, 500));\n target_input = Embedding(input_length=maxlen, input_dim=len(eng_index) + 1, output_dim=500)(target_sequence);\n target_input = merge([t0, target_input], concat_axis=1, mode=\"concat\");\n input_to_decoder_sequence = merge([input_tensor, target_input], concat_axis=-1, mode=\"concat\");\n decoder1 = Conv1D(filters=1000, kernel_size=1, padding=\"same\")(input_to_decoder_sequence);\n decoder1 = BatchNormalization(axis=-1)(decoder1);\n decoder1 = Activation(\"relu\")(decoder1);\n decoder1 = Conv1D(filters=500, kernel_size=3, padding=\"causal\", dilation_rate=1)(decoder1);\n decoder1 = BatchNormalization(axis=-1)(decoder1);\n decoder1 = Activation(\"relu\")(decoder1);\n decoder1 = Conv1D(filters=1000, kernel_size=1, padding=\"same\")(decoder1);\n output_tensor = merge([input_to_decoder_sequence, decoder1], mode=\"sum\");\n decoder2 = BatchNormalization(axis=-1)(output_tensor);\n decoder2 = Activation(\"relu\")(decoder2);\n decoder2 = Conv1D(filters=1000, kernel_size=1, strides=1, padding=\"same\")(decoder2);\n decoder2 = BatchNormalization(axis=-1)(decoder2);\n decoder2 = Activation(\"relu\")(decoder2);\n decoder2 = Conv1D(filters=500, kernel_size=3, padding=\"causal\", dilation_rate=2)(decoder2);\n decoder2 = BatchNormalization(axis=-1)(decoder2);\n decoder2 = Activation(\"relu\")(decoder2);\n decoder2 = Conv1D(filters=1000, kernel_size=1, padding=\"same\")(decoder2);\n output_tensor = merge([output_tensor, decoder2], mode=\"sum\");\n decoder3 = BatchNormalization(axis=-1)(output_tensor);\n decoder3 = Activation(\"relu\")(decoder3);\n decoder3 = Conv1D(filters=1000, kernel_size=1, strides=1, padding=\"same\")(decoder3);\n decoder3 = BatchNormalization(axis=-1)(decoder3);\n decoder3 = Activation(\"relu\")(decoder3);\n decoder3 = Conv1D(filters=500, kernel_size=3, padding=\"causal\", dilation_rate=4)(decoder3);\n decoder3 = BatchNormalization(axis=-1)(decoder3);\n decoder3 = Activation(\"relu\")(decoder3);\n decoder3 = Conv1D(filters=1000, kernel_size=1, padding=\"same\")(decoder3);\n output_tensor = merge([output_tensor, decoder3], mode=\"sum\");\n decoder4 = BatchNormalization(axis=-1)(output_tensor);\n decoder4 = Activation(\"relu\")(decoder4);\n decoder4 = Conv1D(filters=1000, kernel_size=1, strides=1, padding=\"same\")(decoder4);\n decoder4 = BatchNormalization(axis=-1)(decoder4);\n decoder4 = Activation(\"relu\")(decoder4);\n decoder4 = Conv1D(filters=500, kernel_size=3, padding=\"causal\", dilation_rate=8)(decoder4);\n decoder4 = BatchNormalization(axis=-1)(decoder4);\n decoder4 = Activation(\"relu\")(decoder4);\n decoder4 = Conv1D(filters=1000, kernel_size=1, padding=\"same\")(decoder4);\n output_tensor = merge([output_tensor, decoder4], mode=\"sum\");\n decoder5 = BatchNormalization(axis=-1)(output_tensor);\n decoder5 = Activation(\"relu\")(decoder5);\n decoder5 = Conv1D(filters=1000, kernel_size=1, strides=1, padding=\"same\")(decoder5);\n decoder5 = BatchNormalization(axis=-1)(decoder5);\n decoder5 = Activation(\"relu\")(decoder5);\n decoder5 = Conv1D(filters=500, kernel_size=3, padding=\"causal\", dilation_rate=16)(decoder5);\n decoder5 = BatchNormalization(axis=-1)(decoder5);\n decoder5 = Activation(\"relu\")(decoder5);\n decoder5 = Conv1D(filters=1000, kernel_size=1, padding=\"same\")(decoder5);\n output_tensor = merge([output_tensor, decoder5], mode=\"sum\");\n output_tensor = Activation(\"relu\")(output_tensor);\n # decoder=Dropout(0.1)(decoder);\n result = Conv1D(filters=len(eng_index), kernel_size=1, padding=\"same\", activation=\"softmax\")(output_tensor);\n model = Model(inputs=[input_sequence, target_sequence, t0], outputs=result);\n opt = adam(lr=0.0003); # as in the paper, we choose adam optimizer with lr=0.0003\n model.compile(loss='categorical_crossentropy', optimizer=\"adam\", metrics=['categorical_accuracy'],\n sample_weight_mode=\"temporal\");\n return model;\n\ndef train(batch_size,epochs,maxlen):\n French,English=load_dataset(batch_size);\n eng_index, french_index, index_eng, index_french=build_vacabulary(French,English);\n model=build_model(french_index,eng_index,index_french,index_eng,English,French);\n early = EarlyStopping(monitor=\"loss\", mode=\"min\", patience=10);\n lr_change = ReduceLROnPlateau(monitor=\"loss\", factor=0.2, patience=0, min_lr=0.000)\n checkpoint = ModelCheckpoint(filepath=WKDIR + \"/conv1d_french_eng\",\n save_best_only=False);# checkpoint the model after each epoch\n # start training !\n model.fit_generator(generate_batch_data(English,French,eng_index,french_index,batch_size),\n steps_per_epoch=int(len(English) * batch_size ** -1),\n nb_epoch=epochs, workers=1, callbacks=[early, checkpoint, lr_change], initial_epoch=0);\n model.save(WKDIR + \"/conv1d_french_eng.h5\")# where the model is saved\n\nif __name__==\"__main__\":\n batch_size = 50;\n maxlen = 201;\n epochs=1000\n train(batch_size,epochs,maxlen);# run baby run !"
] | [
[
"numpy.load",
"numpy.zeros",
"numpy.random.shuffle"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NimaVahdat/Bio-Inspired-Computation_algorithms | [
"3b04b6f7187f15301f6cf05cb3de2ab0df820ee0"
] | [
"Memetic/MemTSP.py"
] | [
"import random\r\nfrom numpy import vectorize\r\n\r\nclass MemAlgo():\r\n \r\n def __init__(self, hash_map, start, steps = 2, crossover_prob = 0.15, \r\n mutation_prob = 0.15, population_size = 5, iterates = 100, mempop = 5):\r\n self.crossover_prob=crossover_prob\r\n self.mutation_prob=mutation_prob\r\n self.population_size=population_size\r\n self.hash_map = hash_map\r\n self.steps = steps\r\n self.iterates = iterates\r\n self.start = start\r\n self.cities = [k for k in self.hash_map.keys()] \r\n self.cities.remove(start)\r\n self.genes = []\r\n self.epsilon = 1 - 1 / self.iterates \r\n self.generate_G = vectorize(self.generate_G)\r\n # self.evaluate_fit = vectorize(self.evaluate_fit)\r\n self.evolve = vectorize(self.evolve)\r\n self.prune_genes = vectorize(self.prune_genes)\r\n self.converge = vectorize(self.converge)\r\n \r\n # Generating first population\r\n self.generate_G()\r\n\r\n self.mempop = mempop\r\n\r\n def selectMemPop(self):\r\n allo = self.genes[:]\r\n ind = []\r\n for i in range(self.mempop):\r\n want = random.choice(allo)\r\n idx = self.genes.index(want)\r\n del allo[allo.index(want)]\r\n ind.append(idx)\r\n return ind\r\n \r\n def LocalSearch(self, mm):\r\n al = [mm]\r\n for i in range(1, len(mm) - 2):\r\n for j in range(i+1, len(mm) - 1):\r\n al.append(mm[:i] + [mm[j]] + mm[i+1:j] + [mm[i]] + mm[j+1:])\r\n \r\n point = self.evaluate_fit(al)\r\n return al[point.index(max(point))]\r\n\r\n\r\n # Gene generating function \r\n def generate_G(self):\r\n for i in range(self.population_size):\r\n gene = [self.start]\r\n other_c = [k for k in self.cities]\r\n while len(gene) < len(self.cities) + 1:\r\n city = random.choice(other_c)\r\n gene.append(city)\r\n del other_c[other_c.index(city)]\r\n gene.append(self.start)\r\n self.genes.append(gene)\r\n return self.genes\r\n \r\n # Fitness function\r\n def evaluate_fit(self, genes):\r\n fit_point = []\r\n for gene in genes:\r\n total_distance = 0\r\n for idx in range(1,len(gene)):\r\n city_b = gene[idx]\r\n city_a = gene[idx-1]\r\n try:\r\n dist = self.hash_map[city_a][city_b]\r\n except:\r\n dist = self.hash_map[city_b][city_a]\r\n total_distance += int(dist)\r\n fitness = 1 / total_distance\r\n fit_point.append(fitness)\r\n return fit_point\r\n \r\n \r\n def evolve(self):\r\n index_map = {i:'' for i in range(1,len(self.cities)+1)}\r\n indices = [i for i in range(1,len(self.cities)+1)]\r\n to_visit = [c for c in self.cities]\r\n cross = (1 - self.epsilon) * self.crossover_prob\r\n mutate = self.epsilon * self.mutation_prob \r\n crossed_count = int(cross * len(self.cities))\r\n mutated_count = int((mutate * len(self.cities))/2)\r\n for idx in range(len(self.genes)):\r\n gene = self.genes[idx]\r\n for i in range(crossed_count):\r\n try:\r\n gene_index = random.choice(indices)\r\n sample = gene[gene_index]\r\n if sample in to_visit:\r\n index_map[gene_index] = sample\r\n loc = indices.index(gene_index)\r\n del indices[loc]\r\n loc = to_visit.index(sample)\r\n del to_visit[loc]\r\n else:\r\n continue\r\n except:\r\n pass\r\n last_gene = self.genes[-1]\r\n remaining_cities = [c for c in last_gene if c in to_visit]\r\n for k,v in index_map.items():\r\n if v != '':\r\n continue\r\n else:\r\n city = remaining_cities.pop(0)\r\n index_map[k] = city\r\n new_gene = [index_map[i] for i in range(1, len(self.cities)+1)]\r\n new_gene.insert(0, self.start)\r\n new_gene.append(self.start)\r\n for i in range(mutated_count):\r\n choices = [c for c in new_gene if c != self.start]\r\n city_a = random.choice(choices)\r\n city_b = random.choice(choices)\r\n index_a = new_gene.index(city_a)\r\n index_b = new_gene.index(city_b)\r\n new_gene[index_a] = city_b\r\n new_gene[index_b] = city_a\r\n \r\n self.genes.append(new_gene)\r\n \r\n def prune_genes(self): \r\n for i in range(self.steps):\r\n self.evolve()\r\n \r\n memPop = self.selectMemPop()\r\n for m in memPop:\r\n that = self.LocalSearch(self.genes[m])\r\n self.genes[m] = that\r\n \r\n fit_point = self.evaluate_fit(self.genes)\r\n for i in range(self.steps):\r\n worst_gene_index = fit_point.index(min(fit_point))\r\n del self.genes[worst_gene_index]\r\n del fit_point[worst_gene_index]\r\n return max(fit_point), self.genes[fit_point.index(max(fit_point))]\r\n \r\n def converge(self):\r\n for i in range(self.iterates):\r\n values = self.prune_genes()\r\n current_score = values[0]\r\n current_best_gene = values[1]\r\n self.epsilon -= 1/self.iterates\r\n if i % 100 == 0:\r\n print(int(1/current_score), \"miles\")\r\n return current_best_gene, int(1/current_score)"
] | [
[
"numpy.vectorize"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
XuRuihan/YPPF | [
"b6bed5421441563af8e6b5b2d00a31fc9b410e76"
] | [
"app/data_import.py"
] | [
"import pandas as pd\nimport os\n\nBASE_DIR = \"/Users/rickymac/Documents/20Autmun/ypdev/YPPF/boot/boottest/\"\n\n\ndef load():\n # df_2018 = pd.read_csv(BASE_DIR + 'static/2018.csv')\n df_1819 = pd.read_csv(\"app/append.csv\")\n return df_1819\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
rmlarose/rigettiVQLS | [
"2b4b2eeac350780d4393aa8458f92fcc5d06a6fe"
] | [
"data/opt10q/plot_params.py"
] | [
"\"\"\"Plots cost vs iteration data from QPU and simulator.\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# ========\n# Get data\n# ========\n\n# Simulated data\nqvm = np.loadtxt(\"ANGLES_QVM_8q_Thu_Feb_6_15:12:02_2020.txt\")\n\n# Aspen-7-5Q-B QPU data\nqpu1 = np.loadtxt(\"ANGLES_8q_Thu_Feb_6_06:31:39_2020.txt\")\nqpu2 = np.loadtxt(\"ANGLES_8q_Thu_Feb_6_06:46:09_2020.txt\")\nqpu3 = np.loadtxt(\"ANGLES_8q_Thu_Feb_6_06:58:49_2020.txt\")\nqpu4 = np.loadtxt(\"ANGLES_8q_Thu_Feb_6_07:16:13_2020.txt\")\n\nallqpu = [qpu1, qpu2, qpu3, qpu4]\n\n# Aspen-4-5Q-E QPU data\n# aspen5qpu1 = list(np.loadtxt(\"CVALS_5q_Tue_Feb_4_20:12:30_2020_ASPEN_4_5Q_E.txt\"))\n\nprint(qpu1)\n\ncolors = [\"black\", \"blue\", \"green\", \"red\", \"cyan\", \"orange\", \"violet\", \"salmon\"]\n\nplt.rcParams.update({\"font.family\": \"times\", \"font.weight\": \"bold\", \"font.size\": 18})\nplt.figure(figsize=(15, 6))\n\nfor ii in range(qpu1.shape[1]):\n plt.plot(qvm[:, ii], \"--\", color=colors[ii], lw=3, label=r\"$\\theta_{}$\".format(ii + 1))\n\n# for ii in range(qpu1.shape[1]):\n# plt.plot(qpu1[:, ii], \"-.\", color=colors[ii], lw=3)\n\n# ====\n# Plot\n# ====\n# plt.rcParams.update({\"font.family\": \"times\", \"font.weight\": \"bold\", \"font.size\": 18})\n# plt.figure(figsize=(9, 9))\n#\n# # Plot QVM data\n# plt.plot(qvm, color=\"black\", lw=3, label=\"QVM\")\n#\n# # Plot Aspen-7-5Q-B QPU data\n# for (ii, qpu) in enumerate(allqpu):\n# plt.plot(qpu, ls=\"dashdot\", lw=3, label=f\"Aspen-7-8Q-B Run {ii + 1}\")\n#\n# # Plot Aspen-4-5Q-E QPU data\n# # plt.errorbar(xs, aspen5qpu1, yerr=None, ls=\"dashdot\", lw=3, capsize=8, label=\"Aspen-4-5Q-E\")\n#\n# Final touches\nplt.title(\"QVM\")\nplt.legend()\nplt.xlabel(r\"Iteration\")\nplt.ylabel(r\"Angle\")\nplt.grid()\nplt.show()\n"
] | [
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.show",
"numpy.loadtxt",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tagman/vball | [
"73fdbe0e6a44e6852b5f3bd49d7e316ff40d030f"
] | [
"blobber.py"
] | [
"import math\nfrom typing import Union\n\nimport cv2 as cv\nimport numpy as np\nimport ball_net as bn\nimport sys\n\ncount = 0\n\nR = 60\nEPS = 1e-6\nEPS2 = 0.5\n\nSTATUS_INIT = 0\nSTATUS_STATIC = 1\nSTATUS_DIRECTED = 2\n\n\ndef pt_dist(x1, y1, x2, y2):\n dx = x1 - x2\n dy = y1 - y2\n return math.sqrt(dx * dx + dy * dy)\n\n\nclass Blob:\n count = 1\n\n def __init__(self, x, y, radius, age):\n self.id = Blob.count\n Blob.count += 1\n self.points = [[x, y]]\n self.pp = [[radius, age]]\n self.status = STATUS_INIT\n self.v = None\n self.age = age\n self.nx = None\n self.ny = None\n\n def fit(self, x, y):\n # get the distance from the last added point (x and y) to another Point x and y\n d = pt_dist(self.points[-1][0], self.points[-1][1], x, y)\n return d < R, d\n\n def add(self, x, y, r, a):\n self.points.append([x, y])\n self.pp.append([r, a])\n self.age = a\n if len(self.points) > 2:\n # if self.status == STATUS_DIRECTED and self.nx is not None:\n # print(\"Predict\", self.nx, self.ny, \"vs\", x, y)\n\n dx1 = self.points[-2][0] - self.points[-3][0]\n dy1 = self.points[-2][1] - self.points[-3][1]\n\n dx2 = x - self.points[-2][0]\n dy2 = y - self.points[-2][1]\n\n d1 = pt_dist(self.points[-2][0], self.points[-2][1], x, y)\n d2 = pt_dist(self.points[-2][0], self.points[-2][1], self.points[-3][0], self.points[-3][1])\n if dx1 * dx2 > 0 and dy1 * dy2 > 0 and d1 > 5 and d2 > 5:\n self.status = STATUS_DIRECTED\n # print(\"Directed\", self.pts)\n # self.predict()\n elif self.status != STATUS_DIRECTED:\n self.status = STATUS_STATIC\n\n def predict(self):\n npts = np.array(self.points)\n l = len(self.points) + 1\n idx = np.array(range(1, l))\n\n kx = np.polyfit(idx, npts[:, 0], 1)\n fkx = np.poly1d(kx)\n\n ky = np.polyfit(idx, npts[:, 1], 1)\n fky = np.poly1d(ky)\n\n self.nx = fkx(l)\n self.ny = fky(l)\n return self.nx, self.ny\n\n\nBlobs = []\nball_blob: Union[Blob, None] = None\nprev_ball_blob: Union[Blob, None] = None\n\n\ndef get_ball_blob():\n return ball_blob\n\n\ndef find_closest_existing_blob(center_x, center_y):\n global Blobs, count\n rbp = []\n sbp = []\n\n for blob in Blobs:\n # its fitting if the distance is below 60 (why 60?)\n fit, distance = blob.fit(center_x, center_y)\n if fit:\n # new blob is not longer than 4 blobs away\n if count - blob.age < 4:\n rbp.append([blob, distance])\n elif blob.status == STATUS_STATIC:\n sbp.append([blob, distance])\n\n if len(sbp) + len(rbp) == 0:\n return None\n if len(rbp) > 0:\n # sort by distance\n rbp.sort(key=lambda e: e[1])\n # return blob with the lowest distance\n return rbp[0][0]\n else:\n # sort by distance\n sbp.sort(key=lambda e: e[1])\n return sbp[0][0]\n\n\ndef handle_blob(center_x, center_y, radius):\n global Blobs, count, ball_blob\n blob = find_closest_existing_blob(center_x, center_y)\n if blob is None:\n Blobs.append(Blob(center_x, center_y, radius, count))\n return\n blob.add(center_x, center_y, radius, count)\n if blob.status == STATUS_DIRECTED:\n if not ball_blob:\n ball_blob = blob\n # if the current blob has more data its the new ball blob\n elif len(blob.points) > len(ball_blob.points):\n ball_blob = blob\n\n\ndef begin_gen():\n global ball_blob, prev_ball_blob\n prev_ball_blob = ball_blob\n ball_blob = None\n\n\ndef end_gen():\n global count, ball_blob\n count += 1\n\n\ndef handle_blobs(mask, frame):\n contours, _ = cv.findContours(mask, cv.RETR_CCOMP, cv.CHAIN_APPROX_SIMPLE)\n\n begin_gen()\n for contour in contours:\n rectangle_origin_x, rectangle_origin_y, rectangle_width, rectangle_height = cv.boundingRect(contour)\n rectangle_shorter_side = min(rectangle_width, rectangle_height)\n rectangle_longer_side = max(rectangle_width, rectangle_height)\n rectangle_ratio = rectangle_longer_side / rectangle_shorter_side\n # if sides are too large or bounding rectangle of contour is not close to a 1:1 ratio, like for a ball\n # then try the next one\n if rectangle_shorter_side < 10 or rectangle_longer_side > 40 or rectangle_ratio > 1.5:\n continue\n\n # cut the rectangle that is bounding the blob out of the mask\n cut_blob_from_mask = mask[\n rectangle_origin_y: rectangle_origin_y + rectangle_height,\n rectangle_origin_x: rectangle_origin_x + rectangle_width]\n # cv.imshow(\"Cut-Blob\", cut_blob)\n\n is_blob, amount_of_non_zeroes = check_blob(cut_blob_from_mask, rectangle_width, rectangle_height)\n cv.imshow('Mask', mask)\n if not is_blob:\n continue\n probability_non_zeroes = amount_of_non_zeroes / (rectangle_width * rectangle_height)\n # at least half of the pixels should be non-zeroes\n if probability_non_zeroes < 0.5:\n continue\n\n # cut the bounding rectangle from the frame\n cut_frame = frame[\n rectangle_origin_y: rectangle_origin_y + rectangle_height,\n rectangle_origin_x: rectangle_origin_x + rectangle_width]\n # cv.imshow(\"Cut-Frame\", cut_frame)\n\n # why is this done here, whats the benefit?\n # so only the real detected blob is there not the noise from cutting?\n cut_c = cv.bitwise_and(cut_frame, cut_frame, mask=cut_blob_from_mask)\n # cv.imshow(\"Cut-C\", cut_c)\n\n # is the blob a ball? Decided by the NN\n if bn.check_pic(cut_c) != 0:\n continue\n # get data (coordinates) for the enclosing circle of the detected ball\n ((x, y), radius) = cv.minEnclosingCircle(contour)\n # find out if the blob is directed with a previous blob and also add it to blob list\n handle_blob(int(x), int(y), int(radius))\n\n end_gen()\n\n\ndef check_blob(blob, width, height):\n # x and y are always 0\n origin_x = 0\n origin_y = 0\n\n dx = int(width / 5)\n x0 = origin_x + 2 * dx\n vertical_part_of_blob = blob[origin_y: origin_y + height, x0: x0 + dx]\n\n dy = int(height / 5)\n y0 = origin_y + 2 * dy\n # this cuts the 3rd fifth part (horizontal) out from the blob\n horizontal_part_of_blob = blob[y0: y0 + dy, origin_x: origin_x + width]\n\n non_zeroes_in_horizontal_strip = cv.countNonZero(horizontal_part_of_blob)\n non_zeroes_in_vertical_strip = cv.countNonZero(vertical_part_of_blob)\n non_zeroes_in_blob = cv.countNonZero(blob)\n\n lower_count_of_non_zeroes = min(non_zeroes_in_horizontal_strip, non_zeroes_in_vertical_strip)\n upper_count_of_non_zeroes = max(non_zeroes_in_horizontal_strip, non_zeroes_in_vertical_strip)\n\n if lower_count_of_non_zeroes > 0:\n ratio_of_non_zeroes_in_both_strips = upper_count_of_non_zeroes / lower_count_of_non_zeroes\n else:\n ratio_of_non_zeroes_in_both_strips = 1000\n\n ratio_of_non_zeroes_for_horizontal_and_blob = non_zeroes_in_horizontal_strip / non_zeroes_in_blob\n ratio_of_non_zeroes_for_vertical_and_blob = non_zeroes_in_vertical_strip / non_zeroes_in_blob\n\n # what are these ratios? why 1.5, 0.15\n return \\\n ratio_of_non_zeroes_in_both_strips < 1.5 and \\\n ratio_of_non_zeroes_for_horizontal_and_blob > 0.15 and \\\n ratio_of_non_zeroes_for_vertical_and_blob > 0.15, non_zeroes_in_blob\n\n\ndef draw_ball(pic):\n ball = get_ball_blob()\n if ball is not None:\n cv.circle(pic, (ball.points[-1][0], ball.points[-1][1]), 10, (0, 200, 0), 3)\n else:\n if prev_ball_blob is not None:\n x, y = prev_ball_blob.predict()\n cv.circle(pic, (int(x), int(y)), 10, (0, 200, 0), 3)\n\n\nfound_points = []\ndef draw_ball_path(pic):\n ball = get_ball_blob()\n # try detection with vectors and their direction (so 4 points)\n if ball is not None:\n # points_iterator = iter(ball.points)\n sub_points_size = 4\n points = ball.points\n for index, point_to_draw in enumerate(points):\n # point_to_draw = ball.points[index]\n next_four_points = ball.points[index:index+sub_points_size]\n # print(f'current point: {point_to_draw}')\n # print(f'next four points {next_four_points}')\n # next_two_points = list(itertools.islice(points_iterator, 2))\n if len(next_four_points) == 4:\n intersection = get_intersect(next_four_points[0], next_four_points[1], next_four_points[2], next_four_points[3])\n y_coordinates = map(lambda point: point[1], next_four_points)\n intersection_y = intersection[1]\n\n if (intersection_y < float('inf')) and all(i <= intersection_y for i in y_coordinates):\n print(f'lowest point found: {intersection}')\n cv.circle(pic, (intersection[0], intersection_y), 3, (0, 0, 255), -1)\n cv.circle(pic, (point_to_draw[0], point_to_draw[1]), 3, (150, 150, 150), -1)\n\n\ndef get_intersect(a1, a2, b1, b2):\n \"\"\"\n Returns the point of intersection of the lines passing through a2,a1 and b2,b1.\n a1: [x, y] a point on the first line\n a2: [x, y] another point on the first line\n b1: [x, y] a point on the second line\n b2: [x, y] another point on the second line\n \"\"\"\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return (float('inf'), float('inf'))\n return (int(x/z), int(y/z))\n\n\ndef draw_blobs(w, h):\n pic = np.zeros((h, w, 3), np.uint8)\n for b in Blobs:\n clr = (200, 200, 200)\n if b.status == STATUS_STATIC:\n clr = (0, 200, 0)\n elif b.status == STATUS_DIRECTED:\n clr = (200, 0, 0)\n if not b.v is None:\n cv.line(pic, (b.points[0][0], b.points[0][1]), (b.points[-1][0], b.points[-1][1]), (255, 0, 0), 1)\n for p in b.points:\n cv.circle(pic, (p[0], p[1]), 3, clr, -1)\n\n draw_ball(pic)\n\n return pic\n\n\ndef test_clip(path):\n vs = cv.VideoCapture(path)\n backSub = cv.createBackgroundSubtractorMOG2()\n n = 0\n while (True):\n ret, frame = vs.read()\n if not ret or frame is None:\n break\n\n h = int(frame.shape[0] / 2)\n w = int(frame.shape[1] / 2)\n\n frame = cv.resize(frame, (w, h))\n mask = backSub.apply(frame)\n\n mask = cv.dilate(mask, None)\n mask = cv.GaussianBlur(mask, (15, 15), 0)\n ret, mask = cv.threshold(mask, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)\n\n handle_blobs(mask, frame)\n pic = draw_blobs(w, h)\n cv.imshow('frame', pic)\n cv.imwrite(\"frames/frame-{:03d}.jpg\".format(n), pic)\n if cv.waitKey(10) == 27:\n break\n n += 1\n\n\nif __name__ == \"__main__\":\n test_clip(sys.argv[1])\n # test_clip(\"D:/Videos/aus4.avi\")\n"
] | [
[
"numpy.polyfit",
"numpy.poly1d",
"numpy.ones",
"numpy.cross",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ivallesp/AdaIN | [
"f1928a2662a496402e4541bf96bf26a97fde5995"
] | [
"src/vgg.py"
] | [
"import torch\nimport torch.nn as nn\nfrom torchvision.models.utils import load_state_dict_from_url\nfrom typing import Union, List, Dict, Any, cast\n\n\n__all__ = [\n \"VGG\",\n \"vgg11\",\n \"vgg11_bn\",\n \"vgg13\",\n \"vgg13_bn\",\n \"vgg16\",\n \"vgg16_bn\",\n \"vgg19_bn\",\n \"vgg19\",\n]\n\n\nmodel_urls = {\n \"vgg11\": \"https://download.pytorch.org/models/vgg11-8a719046.pth\",\n \"vgg13\": \"https://download.pytorch.org/models/vgg13-19584684.pth\",\n \"vgg16\": \"https://download.pytorch.org/models/vgg16-397923af.pth\",\n \"vgg19\": \"https://download.pytorch.org/models/vgg19-dcbb9e9d.pth\",\n \"vgg11_bn\": \"https://download.pytorch.org/models/vgg11_bn-6002323d.pth\",\n \"vgg13_bn\": \"https://download.pytorch.org/models/vgg13_bn-abd245e5.pth\",\n \"vgg16_bn\": \"https://download.pytorch.org/models/vgg16_bn-6c64b313.pth\",\n \"vgg19_bn\": \"https://download.pytorch.org/models/vgg19_bn-c79401a0.pth\",\n}\n\n\nclass VGG(nn.Module):\n def __init__(\n self, features: nn.Module, num_classes: int = 1000, init_weights: bool = True\n ) -> None:\n super(VGG, self).__init__()\n self.features = features\n self.avgpool = nn.AdaptiveAvgPool2d((7, 7))\n self.classifier = nn.Sequential(\n nn.Linear(512 * 7 * 7, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, 4096),\n nn.ReLU(True),\n nn.Dropout(),\n nn.Linear(4096, num_classes),\n )\n if init_weights:\n self._initialize_weights()\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x = self.features(x)\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.classifier(x)\n return x\n\n def _initialize_weights(self) -> None:\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n if m.bias is not None:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.BatchNorm2d):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, 0, 0.01)\n nn.init.constant_(m.bias, 0)\n\n\ndef make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential:\n layers: List[nn.Module] = []\n in_channels = 3\n for v in cfg:\n if v == \"M\":\n layers += [nn.MaxPool2d(kernel_size=2, stride=2)]\n else:\n v = cast(int, v)\n conv2d = nn.Conv2d(\n in_channels, v, kernel_size=3, padding=1, padding_mode=\"reflect\"\n )\n if batch_norm:\n layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]\n else:\n layers += [conv2d, nn.ReLU(inplace=True)]\n in_channels = v\n return nn.Sequential(*layers)\n\n\ncfgs: Dict[str, List[Union[str, int]]] = {\n \"A\": [64, \"M\", 128, \"M\", 256, 256, \"M\", 512, 512, \"M\", 512, 512, \"M\"],\n \"B\": [64, 64, \"M\", 128, 128, \"M\", 256, 256, \"M\", 512, 512, \"M\", 512, 512, \"M\"],\n \"D\": [\n 64,\n 64,\n \"M\",\n 128,\n 128,\n \"M\",\n 256,\n 256,\n 256,\n \"M\",\n 512,\n 512,\n 512,\n \"M\",\n 512,\n 512,\n 512,\n \"M\",\n ],\n \"E\": [\n 64,\n 64,\n \"M\",\n 128,\n 128,\n \"M\",\n 256,\n 256,\n 256,\n 256,\n \"M\",\n 512,\n 512,\n 512,\n 512,\n \"M\",\n 512,\n 512,\n 512,\n 512,\n \"M\",\n ],\n}\n\n\ndef _vgg(\n arch: str,\n cfg: str,\n batch_norm: bool,\n pretrained: bool,\n progress: bool,\n **kwargs: Any\n) -> VGG:\n if pretrained:\n kwargs[\"init_weights\"] = False\n model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\ndef vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n r\"\"\"VGG 11-layer model (configuration \"A\") from\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg(\"vgg11\", \"A\", False, pretrained, progress, **kwargs)\n\n\ndef vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n r\"\"\"VGG 11-layer model (configuration \"A\") with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg(\"vgg11_bn\", \"A\", True, pretrained, progress, **kwargs)\n\n\ndef vgg13(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n r\"\"\"VGG 13-layer model (configuration \"B\")\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg(\"vgg13\", \"B\", False, pretrained, progress, **kwargs)\n\n\ndef vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n r\"\"\"VGG 13-layer model (configuration \"B\") with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg(\"vgg13_bn\", \"B\", True, pretrained, progress, **kwargs)\n\n\ndef vgg16(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n r\"\"\"VGG 16-layer model (configuration \"D\")\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg(\"vgg16\", \"D\", False, pretrained, progress, **kwargs)\n\n\ndef vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n r\"\"\"VGG 16-layer model (configuration \"D\") with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg(\"vgg16_bn\", \"D\", True, pretrained, progress, **kwargs)\n\n\ndef vgg19(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n r\"\"\"VGG 19-layer model (configuration \"E\")\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg(\"vgg19\", \"E\", False, pretrained, progress, **kwargs)\n\n\ndef vgg19_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG:\n r\"\"\"VGG 19-layer model (configuration 'E') with batch normalization\n `\"Very Deep Convolutional Networks For Large-Scale Image Recognition\" <https://arxiv.org/pdf/1409.1556.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _vgg(\"vgg19_bn\", \"E\", True, pretrained, progress, **kwargs)\n"
] | [
[
"torch.nn.Sequential",
"torch.nn.Dropout",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.nn.init.normal_",
"torch.nn.BatchNorm2d",
"torch.flatten",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mlz-ictrl/nicos | [
"a6de0bc194ba42e3dc04a033713b41b5499ba8e1"
] | [
"test/nicos_ess/test_devices/test_area_detector.py"
] | [
"# -*- coding: utf-8 -*-\n# *****************************************************************************\n# NICOS, the Networked Instrument Control System of the MLZ\n# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)\n#\n# This program is free software; you can redistribute it and/or modify it under\n# the terms of the GNU General Public License as published by the Free Software\n# Foundation; either version 2 of the License, or (at your option) any later\n# version.\n#\n# This program is distributed in the hope that it will be useful, but WITHOUT\n# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS\n# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more\n# details.\n#\n# You should have received a copy of the GNU General Public License along with\n# this program; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n#\n# Module authors:\n# Michele Brambilla <[email protected]>\n#\n# *****************************************************************************\n\n\"\"\"\nTests for EPICS area detector\n\"\"\"\n\nimport time\nimport unittest\nfrom unittest.mock import patch\n\nimport numpy\nimport pytest\n\nfrom epics import PV\n\nfrom nicos.core import CommunicationError, status\nfrom nicos.core.constants import LIVE\n\nfrom nicos_ess.devices.epics.status import ADKafkaStatus\n\nfrom test.nicos_ess.test_devices.utils import create_hs00\n\npytest.importorskip('kafka')\npytest.importorskip('graypy')\n\n\nsession_setup = \"ess_area_detector\"\n\n\[email protected](reason=\"flaky test\")\nclass TestEpicsAreaDetector:\n \"\"\"\n Tests for the operations EPICS areaDetector\n \"\"\"\n\n detector = None\n time_remaining = None\n time_preset = 1\n PVtime = None\n\n def _wait_for_completion(self, start=time.time(), preset=time_preset):\n while self.detector.doStatus()[0] == status.BUSY and \\\n (time.time() - start) < (preset + 1):\n time.sleep(0.01)\n\n @pytest.fixture()\n def reset_time(self, request):\n def fin():\n self.PVtime.put(0.01)\n time.sleep(.5)\n\n request.addfinalizer(fin)\n\n @pytest.fixture(scope='class')\n def devices(self, session):\n try:\n detector = session.getDevice('areadetector_base')\n time_remaining = session.getDevice('time_remaining')\n except CommunicationError as e:\n pytest.skip('%r: areaDetector not connected' % e)\n yield detector, time_remaining\n\n @pytest.fixture(autouse=True)\n def initialize_devices(self, devices):\n \"\"\"\n Initialize the devices if they are not already initialized. If the\n IOC is not running skip all tests\n \"\"\"\n self.detector, self.time_remaining = devices\n self.status = PV(self.detector.errormsgpv)\n if not self.status.connected:\n pytest.skip('PVs not connected')\n\n # set single image mode, else will continuously acquire\n image_mode = PV('13SIM1:cam1:ImageMode')\n image_mode.put('Single')\n self.PVtime = PV('13SIM1:cam1:AcquireTime')\n self.PVtime.put(0.01)\n self.detector.stop()\n\n def test_pv_parameters(self):\n pv_params = self.detector._get_pv_parameters()\n expected_pvlist = ['startpv', 'errormsgpv']\n for _pv in expected_pvlist:\n assert _pv in pv_params\n\n @pytest.mark.skip('Conflicts with some other test, needs debug')\n def test_acquisition_stop(self, reset_time):\n # Force stop acquisition\n self.detector.stop()\n self.detector.wait()\n assert self.detector.doStatus() == (status.OK, 'Idle')\n\n def test_acquisition_start(self, reset_time):\n self.PVtime.put(5)\n self.detector.start()\n assert self.detector.doStatus() == (status.BUSY, 'Acquire')\n\n def test_set_time_preset(self, reset_time):\n \"\"\"\n Test that the time preset is set properly.\n \"\"\"\n self.detector.doSetPreset(t=self.time_preset)\n time.sleep(0.5)\n assert self.PVtime.get() == self.time_preset\n\n def test_acquisition_time(self, reset_time):\n \"\"\"\n Test that the acquisition time corresponds to the time preset\n \"\"\"\n self.detector.doSetPreset(t=self.time_preset)\n time.sleep(0.5)\n start = time.time()\n self.detector.start()\n self._wait_for_completion(start)\n assert self.time_preset <= (\n time.time() - start) <= self.time_preset + 0.1\n\n @pytest.mark.skip(reason=\"Doesn't work in ADSimDetector\")\n def test_remaining_time(self, reset_time):\n pv = PV('13SIM1:cam1:TimeRemaining_RBV')\n self.detector.doSetPreset(t=self.time_preset)\n start = time.time()\n self.detector.start()\n time.sleep(.1 * self.time_preset)\n elapsed = start - time.time()\n assert abs(pv.get() - (self.time_preset - elapsed)) < .1\n\n\nclass TestKafkaPlugin:\n \"\"\"\n Tests for the operations of KafkaPlugin\n \"\"\"\n\n broker = 'ess01.psi.ch:9092'\n topic = 'sim_data_topic'\n detector = None\n log = None\n PVbroker = None\n PVtopic = None\n PVmessage = None\n PVstatus = None\n\n @pytest.fixture(scope='class')\n def devices(self, session):\n try:\n detector = session.getDevice('kafka_plugin')\n except CommunicationError as e:\n pytest.skip('%r: ADPluginKafka not connected' % e)\n yield detector\n\n @pytest.fixture(autouse=True)\n def initialize_devices(self, devices):\n self.detector = devices\n self.log = self.detector.log\n self.PVbroker = PV(self.detector.brokerpv[:-4])\n self.PVtopic = PV(self.detector.topicpv[:-4])\n pv = PV(self.detector.msgpv[:-4])\n if pv.connected:\n self.PVmessage = pv\n pv = PV(self.detector.statuspv[:-4])\n if pv.connected:\n self.PVstatus = pv\n\n @pytest.fixture(autouse=True)\n def restore_pvs(self, request):\n def fin():\n self.log.warning('Restore original values')\n self.PVbroker.put(self.broker)\n self.PVtopic.put(self.topic)\n if self.PVstatus:\n self.PVstatus.put(ADKafkaStatus.CONNECTED)\n if self.PVmessage:\n self.PVmessage.put('')\n\n request.addfinalizer(fin)\n\n def test_pv_parameters(self):\n params = [key for key in self.detector.parameters.keys() if\n key[-2:] == 'pv' and key != 'kafkapv']\n params = list(set(self.detector.kafka_plugin_fields.keys()).union(\n params))\n assert sorted(params) == sorted(self.detector._get_pv_parameters())\n\n def test_no_duplication_in_pv_parameters(self):\n names = self.detector._get_pv_parameters()\n assert sorted(names) == sorted(list(set(names)))\n\n def test_broker(self):\n assert self.detector.broker == self.broker\n self.PVbroker.put('a_different_broker.psi.ch:9092')\n time.sleep(.5)\n assert self.detector.broker == 'a_different_broker.psi.ch:9092'\n\n def test_topic(self):\n assert self.detector.topic == self.topic\n self.PVtopic.put('a_different_topic')\n time.sleep(.5)\n assert self.detector.topic == 'a_different_topic'\n\n def test_status_on_success(self):\n st = self.detector.doStatus()\n assert st[0] == status.OK\n\n def test_status_on_empty_broker(self):\n self.PVbroker.put('')\n st = self.detector.doStatus()\n assert st[0] == status.ERROR\n\n def test_status_on_empty_topic(self):\n self.PVtopic.put('')\n st = self.detector.doStatus()\n assert st[0] == status.ERROR\n\n def test_status_on_connecting(self):\n if not self.PVstatus:\n pytest.skip('Can\\'t change PV status')\n self.PVstatus.put(ADKafkaStatus.CONNECTING)\n time.sleep(.5)\n assert self.detector.doStatus()[0] == status.WARN\n\n def test_status_on_disconnected(self):\n if not self.PVstatus:\n pytest.skip('Can\\'t change PV status')\n self.PVstatus.put(ADKafkaStatus.DISCONNECTED)\n time.sleep(.5)\n st = self.detector.doStatus()\n assert st[0] == status.ERROR\n\n def test_status_on_error(self):\n if not self.PVstatus:\n pytest.skip('Can\\'t change PV status')\n self.PVstatus.put(ADKafkaStatus.ERROR)\n time.sleep(.5)\n st = self.detector.doStatus()\n assert st[0] == status.ERROR\n\n def test_status_on_message(self):\n if not self.PVmessage or not self.PVmessage.connected:\n pytest.skip('Can\\'t change PV status')\n msg = 'Any Kafka status message'\n self.PVmessage.put(msg)\n st = self.detector.doStatus()\n assert st[1] == msg\n\n\n\n\nclass TestKafkaAreaDetectorConsumer:\n \"\"\"\n Test operation of areaDetector messages consumer.\n Interaction with Kafka Plugin and Flatbuffers hs00 deserializer is required\n \"\"\"\n broker = 'ess01.psi.ch:9092'\n topic = 'sim_data_topic'\n detector = None\n plugin = None\n warning = None\n PVbroker = None\n PVtopic = None\n PVmessage = None\n PVstatus = None\n\n @pytest.fixture(scope='class')\n def devices(self, session):\n try:\n detector = session.getDevice('kafka_image_channel')\n except CommunicationError as e:\n pytest.skip('%r: ADPluginKafka not connected' % e)\n yield detector\n\n @pytest.fixture(autouse=True)\n def initialize_devices(self, devices):\n self.detector = devices\n self.plugin = self.detector._attached_kafka_plugin\n self.warning = self.detector.log.warning\n self.PVbroker = PV(self.plugin.brokerpv[:-4])\n self.PVtopic = PV(self.plugin.topicpv[:-4])\n pv = PV(self.plugin.msgpv[:-4])\n if pv.connected:\n self.PVmessage = pv\n pv = PV(self.plugin.statuspv[:-4])\n if pv.connected:\n self.PVstatus = pv\n\n @pytest.fixture(autouse=True)\n def restore_pvs(self, request):\n def fin():\n self.warning('Restore original values')\n self.PVbroker.put(self.broker)\n self.PVtopic.put(self.topic)\n if self.PVstatus:\n self.PVstatus.put(ADKafkaStatus.CONNECTED)\n if self.PVmessage:\n self.PVmessage.put('')\n\n request.addfinalizer(fin)\n\n def test_last_message(self):\n \"\"\"\n Test that _last_message is actually the message with the larger\n timestamp\n \"\"\"\n messages = [(1234, list(range(10))),\n (5678, list(range(8, 20))),\n (9012, list(range(5, 35, 7)))\n ]\n self.detector.new_messages_callback(messages)\n assert self.detector._lastmessage == (9012, list(range(5, 35, 7)))\n\n def test_broker_and_topic_are_valid(self):\n assert self.plugin.broker and self.plugin.topic\n\n def test_status_on_empty_broker(self):\n self.PVbroker.put('')\n st = self.detector.doStatus()\n assert st == (status.ERROR, 'Empty broker')\n\n def test_status_on_empty_topic(self):\n self.PVtopic.put('')\n st = self.detector.doStatus()\n assert st == (status.ERROR, 'Empty topic')\n\n def test_consumer_is_valid(self):\n assert self.detector._consumer\n\n def test_consumer_is_subscribed(self):\n assert self.detector._consumer.subscription()\n\n @pytest.mark.skip\n def test_consumer_failure(self):\n # TODO\n st = self.detector.doStatus()\n assert st == (status.ERROR, 'Broker failure')\n\n @pytest.mark.skip\n def test_consumer_subscription_failure(self):\n # TODO\n st = self.detector.doStatus()\n assert st == (status.WARN, 'No topic subscribed')\n\n def test_consume_serialized_messages(self):\n \"\"\"\n Creates a set of (timestamp, flatbuffer image array) and feed them\n into the detector. Test that `doReadArray` returns the last image,\n unbuffered.\n \"\"\"\n raw = {}\n timestamps = numpy.random.randint(1e9, high=8e9, size=10)\n for ts in timestamps:\n raw[ts] = numpy.random.randint(1, high=100, size=[10, ],\n dtype='uint32')\n messages = []\n for ts, data in raw.items():\n messages.append((ts, create_hs00(data=numpy.array(data),\n timestamp=ts,\n source='test_histo')))\n self.detector.new_messages_callback(messages)\n data = self.detector.doReadArray(None)\n assert (raw[max(timestamps)] == data).all()\n\n\nclass TestEpicsAreaDetectorWithKafkaPlugin:\n \"\"\"\n Tests for the operations of EPICS areaDetector with configured PluginKafka.\n In practice, make sure that information propagates correctly from\n attached devices down to areaDetector\n \"\"\"\n\n time_preset = 1\n broker = 'ess01.psi.ch:9092'\n topic = 'sim_data_topic'\n detector = None\n plugin = None\n image_channel = None\n PVtime = None\n PVbroker = None\n PVtopic = None\n PVmessage = None\n PVstatus = None\n\n def _wait_for_completion(self, start=time.time(), preset=time_preset):\n while self.detector.doStatus()[0] == status.BUSY and \\\n (time.time() - start) < (preset + 1):\n time.sleep(0.01)\n\n @pytest.fixture(autouse=True)\n def restore_pvs(self, request):\n def fin():\n self.PVbroker.put(self.broker)\n self.PVtopic.put(self.topic)\n if self.PVstatus:\n self.PVstatus.put(ADKafkaStatus.CONNECTED)\n if self.PVmessage:\n self.PVmessage.put('')\n\n request.addfinalizer(fin)\n\n @pytest.fixture()\n def reset_time(self, request):\n def fin():\n self.PVtime.put(0.01)\n time.sleep(.5)\n\n request.addfinalizer(fin)\n\n @pytest.fixture(scope='class')\n def devices(self, session):\n try:\n detector = session.getDevice('areadetector_kafka')\n plugin = session.getDevice('kafka_plugin')\n image_channel = session.getDevice('kafka_image_channel')\n except CommunicationError as e:\n pytest.skip('%r: ADPluginKafka not connected' % e)\n yield detector, plugin, image_channel\n\n @pytest.fixture(autouse=True)\n def initialize_devices(self, devices):\n \"\"\"\n Initialize the devices if they are not already initialized. If the\n IOC is not running skip all tests\n \"\"\"\n # set single image mode, else will continuously acquire\n self.detector, self.plugin, self.image_channel = devices\n\n self.status = PV(self.detector.errormsgpv)\n if not self.status.connected:\n pytest.skip('PVs not connected')\n image_mode = PV('13SIM1:cam1:ImageMode')\n image_mode.put('Single')\n self.PVtime = PV('13SIM1:cam1:AcquireTime')\n self.PVtime.put(0.01)\n self.detector.stop()\n self.PVbroker = PV(self.plugin.brokerpv[:-4])\n self.PVtopic = PV(self.plugin.topicpv[:-4])\n pv = PV(self.plugin.msgpv[:-4])\n pv.get()\n if pv.connected:\n self.PVmessage = pv\n pv = PV(self.plugin.statuspv[:-4])\n pv.get()\n if pv.connected:\n self.PVstatus = pv\n self.log = self.detector.log\n\n def test_status_on_empty_broker(self):\n self.PVbroker.put('')\n assert self.detector.doStatus() == (status.ERROR, 'Empty broker')\n\n def test_status_on_empty_topic(self):\n self.PVtopic.put('')\n assert self.detector.doStatus() == (status.ERROR, 'Empty topic')\n\n def test_status_on_connecting(self, restore_pvs):\n if not self.PVstatus:\n pytest.skip('Can\\'t change PV status')\n self.PVstatus.put(ADKafkaStatus.CONNECTING)\n time.sleep(.5)\n assert self.detector.doStatus() == (status.WARN, 'Connecting')\n\n def test_status_on_disconnected(self):\n if not self.PVstatus:\n pytest.skip('Can\\'t change PV status')\n self.PVstatus.put(ADKafkaStatus.DISCONNECTED)\n self.PVmessage.put('A meaningful disconnected error message')\n time.sleep(.5)\n assert self.detector.doStatus() == (\n status.ERROR, 'A meaningful disconnected error message')\n\n def test_status_on_error(self):\n if not self.PVstatus:\n pytest.skip('Can\\'t change PV status')\n self.PVstatus.put(ADKafkaStatus.ERROR)\n self.PVmessage.put('A meaningful error message')\n time.sleep(.5)\n st = self.detector.doStatus()\n self.log.warning(st)\n assert st == (status.ERROR, 'A meaningful error message')\n\n def test_consume_serialized_messages(self):\n \"\"\"\n Creates a set of (timestamp, flatbuffer image array) and feed them\n into the detector. Test that `doReadArray` returns the last image,\n unbuffered.\n \"\"\"\n raw = {}\n timestamps = numpy.random.randint(1e9, high=8e9, size=10)\n for ts in timestamps:\n raw[ts] = numpy.random.randint(1, high=100, size=[5, 5, ],\n dtype='uint32')\n messages = []\n for ts, data in raw.items():\n messages.append((ts, create_hs00(data=numpy.array(data),\n timestamp=ts,\n source='test_histo')))\n self.image_channel.new_messages_callback(messages)\n assert (raw[max(timestamps)] == self.detector.readArrays(LIVE)).all()\n\n\[email protected](reason=\"flaky test\")\nclass TestEpicsAreaDetector1(unittest.TestCase):\n\n def create_patch(self, name):\n patcher = patch(name)\n thing = patcher.start()\n self.addCleanup(patcher.stop)\n return thing\n\n @pytest.fixture(autouse=True)\n def initialize_devices(self, session):\n self.session = session\n self.mock = self.create_patch('epics.pv.PV')\n self.detector = session.getDevice('areadetector_base')\n self.adkafka_plugin = session.getDevice('kafka_plugin')\n\n def test_record_fields(self):\n assert hasattr(self.detector, '_record_fields')\n assert hasattr(self.adkafka_plugin, '_record_fields')\n"
] | [
[
"numpy.array",
"numpy.random.randint"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xumeng723/nni | [
"141f24d42d2e86ace3774d931bfab58dca0ef1ad"
] | [
"src/sdk/pynni/nni/metis_tuner/lib_acquisition_function.py"
] | [
"# Copyright (c) Microsoft Corporation\n# All rights reserved.\n#\n# MIT License\n#\n# Permission is hereby granted, free of charge,\n# to any person obtaining a copy of this software and associated\n# documentation files (the \"Software\"), to deal in the Software without restriction,\n# including without limitation the rights to use, copy, modify, merge, publish,\n# distribute, sublicense, and/or sell copies of the Software, and\n# to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n# The above copyright notice and this permission notice shall be included\n# in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING\n# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,\n# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\nimport sys\nimport numpy\n\nfrom scipy.stats import norm\nfrom scipy.optimize import minimize\n\nimport nni.metis_tuner.lib_data as lib_data\n\n\ndef next_hyperparameter_expected_improvement(fun_prediction, \n fun_prediction_args,\n x_bounds, x_types,\n samples_y_aggregation,\n minimize_starting_points,\n minimize_constraints_fun=None):\n '''\n \"Expected Improvement\" acquisition function\n '''\n best_x = None\n best_acquisition_value = None\n x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds]\n x_bounds_minmax = numpy.array(x_bounds_minmax)\n\n for starting_point in numpy.array(minimize_starting_points):\n res = minimize(fun=_expected_improvement,\n x0=starting_point.reshape(1, -1),\n bounds=x_bounds_minmax,\n method=\"L-BFGS-B\",\n args=(fun_prediction,\n fun_prediction_args,\n x_bounds,\n x_types,\n samples_y_aggregation,\n minimize_constraints_fun))\n\n if (best_acquisition_value is None) or \\\n (res.fun < best_acquisition_value):\n res.x = numpy.ndarray.tolist(res.x)\n res.x = lib_data.match_val_type(res.x, x_bounds, x_types)\n if (minimize_constraints_fun is None) or \\\n (minimize_constraints_fun(res.x) is True):\n best_acquisition_value = res.fun\n best_x = res.x\n\n outputs = None\n if best_x is not None:\n mu, sigma = fun_prediction(best_x, *fun_prediction_args)\n outputs = {'hyperparameter': best_x, 'expected_mu': mu,\n 'expected_sigma': sigma, 'acquisition_func': \"ei\"}\n\n return outputs\n\ndef _expected_improvement(x, fun_prediction, fun_prediction_args,\n x_bounds, x_types, samples_y_aggregation,\n minimize_constraints_fun):\n # This is only for step-wise optimization\n x = lib_data.match_val_type(x, x_bounds, x_types)\n\n expected_improvement = sys.maxsize\n if (minimize_constraints_fun is None) or (minimize_constraints_fun(x) is True):\n mu, sigma = fun_prediction(x, *fun_prediction_args)\n\n loss_optimum = min(samples_y_aggregation)\n scaling_factor = -1\n\n # In case sigma equals zero\n with numpy.errstate(divide=\"ignore\"):\n Z = scaling_factor * (mu - loss_optimum) / sigma\n expected_improvement = scaling_factor * (mu - loss_optimum) * \\\n norm.cdf(Z) + sigma * norm.pdf(Z)\n expected_improvement = 0.0 if sigma == 0.0 else expected_improvement\n\n # We want expected_improvement to be as large as possible\n # (i.e., as small as possible for minimize(...))\n expected_improvement = -1 * expected_improvement\n return expected_improvement\n\n\ndef next_hyperparameter_lowest_confidence(fun_prediction,\n fun_prediction_args,\n x_bounds, x_types,\n minimize_starting_points,\n minimize_constraints_fun=None):\n '''\n \"Lowest Confidence\" acquisition function\n '''\n best_x = None\n best_acquisition_value = None\n x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds]\n x_bounds_minmax = numpy.array(x_bounds_minmax)\n\n for starting_point in numpy.array(minimize_starting_points):\n res = minimize(fun=_lowest_confidence,\n x0=starting_point.reshape(1, -1),\n bounds=x_bounds_minmax,\n method=\"L-BFGS-B\",\n args=(fun_prediction,\n fun_prediction_args,\n x_bounds,\n x_types,\n minimize_constraints_fun))\n\n if (best_acquisition_value) is None or (res.fun < best_acquisition_value):\n res.x = numpy.ndarray.tolist(res.x)\n res.x = lib_data.match_val_type(res.x, x_bounds, x_types)\n if (minimize_constraints_fun is None) or (minimize_constraints_fun(res.x) is True):\n best_acquisition_value = res.fun\n best_x = res.x\n\n outputs = None\n if best_x is not None:\n mu, sigma = fun_prediction(best_x, *fun_prediction_args)\n outputs = {'hyperparameter': best_x, 'expected_mu': mu,\n 'expected_sigma': sigma, 'acquisition_func': \"lc\"}\n return outputs\n\ndef _lowest_confidence(x, fun_prediction, fun_prediction_args,\n x_bounds, x_types, minimize_constraints_fun):\n # This is only for step-wise optimization\n x = lib_data.match_val_type(x, x_bounds, x_types)\n\n ci = sys.maxsize\n if (minimize_constraints_fun is None) or (minimize_constraints_fun(x) is True):\n mu, sigma = fun_prediction(x, *fun_prediction_args)\n ci = (sigma * 1.96 * 2) / mu\n # We want ci to be as large as possible\n # (i.e., as small as possible for minimize(...),\n # because this would mean lowest confidence\n ci = -1 * ci\n\n return ci\n\n\ndef next_hyperparameter_lowest_mu(fun_prediction,\n fun_prediction_args,\n x_bounds, x_types,\n minimize_starting_points,\n minimize_constraints_fun=None):\n '''\n \"Lowest Mu\" acquisition function\n '''\n best_x = None\n best_acquisition_value = None\n x_bounds_minmax = [[i[0], i[-1]] for i in x_bounds]\n x_bounds_minmax = numpy.array(x_bounds_minmax)\n\n for starting_point in numpy.array(minimize_starting_points):\n res = minimize(fun=_lowest_mu,\n x0=starting_point.reshape(1, -1),\n bounds=x_bounds_minmax,\n method=\"L-BFGS-B\",\n args=(fun_prediction, fun_prediction_args, \\\n x_bounds, x_types, minimize_constraints_fun))\n\n if (best_acquisition_value is None) or (res.fun < best_acquisition_value):\n res.x = numpy.ndarray.tolist(res.x)\n res.x = lib_data.match_val_type(res.x, x_bounds, x_types)\n if (minimize_constraints_fun is None) or (minimize_constraints_fun(res.x) is True):\n best_acquisition_value = res.fun\n best_x = res.x\n\n outputs = None\n if best_x is not None:\n mu, sigma = fun_prediction(best_x, *fun_prediction_args)\n outputs = {'hyperparameter': best_x, 'expected_mu': mu,\n 'expected_sigma': sigma, 'acquisition_func': \"lm\"}\n return outputs\n\n\ndef _lowest_mu(x, fun_prediction, fun_prediction_args,\n x_bounds, x_types, minimize_constraints_fun):\n '''\n Calculate the lowest mu\n '''\n # This is only for step-wise optimization\n x = lib_data.match_val_type(x, x_bounds, x_types)\n\n mu = sys.maxsize\n if (minimize_constraints_fun is None) or (minimize_constraints_fun(x) is True):\n mu, _ = fun_prediction(x, *fun_prediction_args)\n return mu\n "
] | [
[
"scipy.stats.norm.cdf",
"scipy.stats.norm.pdf",
"numpy.errstate",
"numpy.ndarray.tolist",
"numpy.array"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Philip-Tremblay/Facial_Keypoints | [
"83ef100f6a08be70e50aabb4f50799a49ea78651"
] | [
"models_4.py"
] | [
"## TODO: define the convolutional neural network architecture\n\nimport torch\nfrom torch.autograd import Variable\nimport torch.nn as nn\nimport torch.nn.functional as F\n# can use the below import should you choose to initialize the weights of your Net\nimport torch.nn.init as I\n\n\nclass Net(nn.Module):\n\n def __init__(self):\n super(Net, self).__init__() \n \n t = 16 # to manually scale the network\n \n # input tensor: (1, 224, 224)\n self.conv1 = nn.Conv2d(1, t, 5)\n # pooling\n # output tensor: (t, 110, 110)\n #self.conv1_bn = nn.BatchNorm2d(t)\n \n self.conv2 = nn.Conv2d(t, 2*t, 5)\n # pooling\n # output tensor: (6, 54, 54)\n #self.conv2_bn = nn.BatchNorm2d(2*t)\n \n self.conv3 = nn.Conv2d(2*t, 4*t, 3)\n # pooling\n # output tensor: (12, 25, 25)\n #self.conv3_bn = nn.BatchNorm2d(3*t)\n \n self.conv4 = nn.Conv2d(4*t, 8*t, 2)\n # pooling\n # output tensor: (8t, 12, 12) \n #self.conv4_bn = nn.BatchNorm2d(8*t)\n \n self.pool = nn.MaxPool2d(2, 2)\n \n cv_size=t*12*12\n \n self.fc1 = nn.Linear(8*cv_size, 4*cv_size)\n self.fc2 = nn.Linear(4*cv_size, 2*cv_size)\n self.fc3 = nn.Linear(2*cv_size, 136)\n \n \n # dropout with p=0.4\n self.dropout = nn.Dropout(p=0.4)\n\n \n def forward(self, x):\n ## TODO: Define the feedforward behavior of this model\n ## x is the input image and, as an example, here you may choose to include a pool/conv step:\n \n '''\n x = self.pool(F.relu(self.conv1_bn(self.conv1(x))))\n x = self.pool(F.relu(self.conv2_bn(self.conv2(x))))\n x = self.pool(F.relu(self.conv3_bn(self.conv3(x))))\n x = self.pool(F.relu(self.conv4_bn(self.conv4(x))))\n '''\n x = self.pool(F.relu(self.conv1(x)))\n x = self.pool(F.relu(self.conv2(x)))\n x = self.pool(F.relu(self.conv3(x)))\n x = self.pool(F.relu(self.conv4(x)))\n \n \n # flatten\n x = x.view(x.size(0), -1)\n \n x = self.dropout(F.relu(self.fc1(x)))\n x = self.dropout(F.relu(self.fc2(x)))\n x = self.fc3(x)\n \n\n \n # a modified x, having gone through all the layers of your model, should be returned\n return x\n"
] | [
[
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.Dropout",
"torch.nn.Conv2d"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
carpentries-incubator/ml4bio-workshop | [
"29fe78b821aa9f411ca9bf7ad5c5e10b9ae2dee0"
] | [
"data/simulated_t_cell/code/sim_tcell_2.py"
] | [
"\"\"\"\nBinary classification:\nCircular pattern\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import make_circles\nfrom matplotlib import pyplot as plt\n\ndata = make_circles(n_samples=100, noise=0.2, factor=0.5, random_state=1)\nX, y = data\nX = pd.DataFrame(X, columns=['cell_size','total_intensity'])\ny = pd.DataFrame(y, columns=['class'])\ndata = pd.concat([X, y], axis=1)\ndata['class'] = data['class'].replace(0,'quiescent')\ndata['class'] = data['class'].replace(1,'activated')\ndata.to_csv('../simulated_t_cells_2.csv', sep=',', index=False)\n\nplt.figure(figsize=(4,4), dpi=100)\nplt.scatter(X['cell_size'], X['total_intensity'], c=y['class'], cmap=plt.cm.brg)\nplt.axis('off')\nplt.savefig('../images/simulated_t_cells_2.png', transparent=True)\n#plt.show()\n\n"
] | [
[
"pandas.concat",
"matplotlib.pyplot.scatter",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"sklearn.datasets.make_circles",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.figure"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
seanpars98/HTResearch | [
"026c115fa20cbf3b3f653e8e55d7fd837ec3a345"
] | [
"Beginings_of_model.py"
] | [
"#import data processing module\nimport pandas as pd\n#import ploting and visualization module\nimport matplotlib.pyplot as plt\n#need regular expressions for parsing\nimport re\n#easy counter for lists\nfrom collections import Counter\n\n#read the data into a Pandas dataframe for easy manipulation from the CSV file\ndata = pd.read_csv('Trafficking_Data.csv')\n\n#print the first 5 rows of the dataframe/file\n#print(data.head())\n\n#clone the different columns into manipulatable lists\nids = data['id'].tolist()\ntitles = data['title'].tolist()\npub_dates = data['pub_date'].tolist()\ncontent = data['body'].tolist()\nurls = data['url'].tolist()\nages = data['poster_age'].tolist()\ndate_collected = data['date_collected'].tolist()\n\n#test loop to print titles then content in more organized rows\n#for i, j in zip(titles, content):\n\t#print('Title: \\n', i, '\\nBody: \\n', j, '\\n----------------------------------------')\n\n\n#############################Phone Number Extraction###############################\n\n#method that finds a phone number within title or description of ad\ndef find_phone(text):\n\n\t#regex testing a variety of phone number formats\n\n\t#000-000-0000\n\t#000 000 0000\n\t#000.000.0000\n\n\t#(000)000-0000\n\t#(000)000 0000\n\t#(000)000.0000\n\t#(000) 000-0000\n\t#(000) 000 0000\n\t#(000) 000.0000\n\n\t#000-0000\n\t#000 0000\n\t#000.0000\n\n\t#0000000\n\t#0000000000\n\t#(000)0000000\n\n\treg = re.compile(r'(\\d{3}[-\\.\\s]??\\d{3}[-\\.\\s]??\\d{4}|\\(\\d{3}\\)\\s*\\d{3}[-\\.\\s]??\\d{4}|\\d{3}[-\\.\\s]??\\d{4})')\n\n\t#results found stored in results\n\tresults = reg.findall(text)\n\n\t#return the matches that we found\n\treturn results\n\n#initialize the results list\ntrue_results = []\nclean_results = []\ndups_results = []\n\n#iterate through the titles and call the method which finds a phone number\n#for i in titles:\n\t#true_results += find_phone(text=i)\n\n#iterate through the descriptions and call the method which finds the phone number\nfor i in content:\n\ttrue_results += find_phone(text=str(i))\n\n#print the results on whether a number was found\nprint(true_results)\nprint('\\n', len(true_results))\n\n#sift through and return the numbers that repeat more than once\ndups_results = [k for k, v in Counter(true_results).items() if v > 1]\n\n#prints the duplicate numbers and the count of dups\nprint(dups_results)\nprint(len(dups_results))\n\n###################################################################################\n\n\n\n\n\n########################## Basic Word Pattern recognition ##########################\n\n\n\n\n\n#data['poster_age'].hist(bins=10)\n\n#plt.show()\n\n\n\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Sebastian-Whitehead/Medialogi-P3-02 | [
"8fb144c17a10417aa2f5a01fcbc71b4d562d4d27"
] | [
"softwareProgram/connectedComponentsMethod.py"
] | [
"import frameUI, ColorMask, cv2\nfrom BlobTracking import BlobTracking\nimport numpy as np\n\n\n# Blob class which keeps track of each blobs position and dimensions\nclass Blob:\n # Construct getting position and dimension of blob\n def __init__(self, x: int, y: int, w: int, h: int):\n self.x, self.y = x, y # Initialize blobs position\n self.w, self.h = w, h # Initialize blobs dimensions\n\n\n# Find green objects in an image\n# Using OpenCV ConnectedComponentMethod and LAB image color space\nclass ConnectedComponentMethod:\n # Constructer initlizing a BlobTracking object with the arguments (total squat and sets)\n def __init__(self, squatTotal, setTotal):\n self.blobTracking = BlobTracking( # Intializing a BlobTracking object\n squatTotal=squatTotal, # Intilize total squat target\n setTotal=setTotal # Intilize total set target\n )\n\n # Find green objects in image\n def run(self, originalImage: np.ndarray) -> np.ndarray:\n processedImage = ColorMask.colorMaskLAB(originalImage) # Mask out green colors in image\n blobs = findBlobs(processedImage) # Find blobs in the binary masked image\n blobs = mergeBlobs(blobs, 5) # Merge blobs by overlay\n\n # Filter blobs being still too small at given threshold\n filteredBlobs = [] # Initiliz empty array for filtered blobs\n for blob in blobs: # Loop all blobs\n if blob.w > 5 or blob.h > 5: filteredBlobs.append(blob) # Append accepted blobs\n blobs = filteredBlobs # Assign filtered blobs to \"blobs\"\n\n # Draw a red line over each blob found\n for blob in blobs: # Loop all blobs\n frameUI.drawTrackingLine(originalImage, blob.x, blob.y, blob.w) # Draw tracking line (frameUI)\n\n self.blobTracking.run(blobs, originalImage) # Run blobTracking with blobs and the original image\n\n # cv2.imshow('ConnectedComponents', originalImage)\n\n return originalImage # Return original image for visualization\n\n\n# Find blobs in the binary masked image\ndef findBlobs(processedImage: np.ndarray) -> [Blob]:\n blobs = [] # Declare empty array for containing blobs\n num_labels, labels = cv2.connectedComponents(processedImage) # Label image using grass-fire\n # \"num_labels\" is the amount of labels in the given image\n # \"labels\" is the labeled image as a 2D-array with each object having individual labels\n\n # Find position and dimension of all objects in labeled image\n for n in range(1, num_labels): # Loop given all labels except 0\n # Get location of each value containing the specific label (n)\n nonZeroX, nonZeroY = np.where(labels[:, :] == n) # \"nonZero\" is the location in the x- or y-axis\n\n # Get the position and dimension of given object\n x, y = min(nonZeroY), min(nonZeroX) # Get minimal location of x- and y-axis\n w, h = max(nonZeroY) - x, max(nonZeroX) - y # Get maximal location of x- and y-axis\n\n if 1 < w and 1 < h: blobs.append(Blob(x, y, w, h)) # Filter small blobs at given threshold\n\n return blobs # Return array of all the blobs founds in image\n\n\n# Check if two rectangles are overlapping with additional threshold\ndef checkOverLap(obj1, obj2, threshold: int) -> bool:\n if obj1.x - threshold < obj2.x + obj2.w + threshold: # left1 vs right2\n if obj1.x + obj1.w + threshold > obj2.x - threshold: # right1 vs left2\n if obj1.y - threshold < obj2.y + obj2.h + threshold: # top1 vs bottom2\n if obj1.h + obj1.y + threshold > obj2.y - threshold: # bottom1 vs top2\n return True # Return true if the rect. overlap\n return False # Return false if the rect. do not overlap\n\n\n# Merge two rectangles (blobs) into one if they overlap\ndef mergeBlobs(blobs, threshold: int) -> [Blob]:\n blobs = list(set(blobs)) # Copy blob list\n # Double loop all blobs\n for blob1 in list(blobs):\n for blob2 in list(blobs):\n if blob1 is not blob2 and checkOverLap(blob1, blob2, threshold):\n if blob1 in blobs: blobs.remove(blob1) # Remove first blob\n if blob2 in blobs: blobs.remove(blob2) # Remove second blob\n x, y = min(blob1.x, blob2.x), min(blob1.y, blob2.y) # Get the minimal blob position\n w, h = max(blob1.w, blob2.w), max(blob1.h, blob2.h) # Get maximum blob dimension\n blobs.append(Blob(x, y, w, h)) # Append new blob to blob array\n return blobs # Return updated array of blobs\n\n\n# Will only be called when running this file\n# Used for testing functions in file\nif __name__ == \"__main__\":\n img = cv2.imread(\"TestImages/Gloves1.png\") # Get image\n maskedImg = ConnectedComponentMethod(10, 2).run(img) # Find green objects in img\n cv2.imshow('Color masking masking - LAB', maskedImg) # Show masked img\n cv2.waitKey(0) # Stop program to see image(s)\n"
] | [
[
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sampathweb/model-viz | [
"33673093d42ae67983be5ac11b01f3f6732b27da"
] | [
"shablona/shablona.py"
] | [
"from __future__ import absolute_import, division, print_function\nimport numpy as np\nimport pandas as pd\nimport scipy.optimize as opt\nfrom scipy.special import erf\nfrom .due import due, Doi\n\n__all__ = [\"Model\", \"Fit\", \"opt_err_func\", \"transform_data\", \"cumgauss\"]\n\n\n# Use duecredit (duecredit.org) to provide a citation to relevant work to\n# be cited. This does nothing, unless the user has duecredit installed,\n# And calls this with duecredit (as in `python -m duecredit script.py`):\ndue.cite(Doi(\"10.1167/13.9.30\"),\n description=\"Template project for small scientific Python projects\",\n tags=[\"reference-implementation\"],\n path='shablona')\n\n\ndef transform_data(data):\n \"\"\"\n Function that takes experimental data and gives us the\n dependent/independent variables for analysis\n\n Parameters\n ----------\n data : Pandas DataFrame or string.\n If this is a DataFrame, it should have the columns `contrast1` and\n `answer` from which the dependent and independent variables will be\n extracted. If this is a string, it should be the full path to a csv\n file that contains data that can be read into a DataFrame with this\n specification.\n\n Returns\n -------\n x : array\n The unique contrast differences.\n y : array\n The proportion of '2' answers in each contrast difference\n n : array\n The number of trials in each x,y condition\n \"\"\"\n if isinstance(data, str):\n data = pd.read_csv(data)\n\n contrast1 = data['contrast1']\n answers = data['answer']\n\n x = np.unique(contrast1)\n y = []\n n = []\n\n for c in x:\n idx = np.where(contrast1 == c)\n n.append(float(len(idx[0])))\n answer1 = len(np.where(answers[idx[0]] == 1)[0])\n y.append(answer1 / n[-1])\n return x, y, n\n\n\ndef cumgauss(x, mu, sigma):\n \"\"\"\n The cumulative Gaussian at x, for the distribution with mean mu and\n standard deviation sigma.\n\n Parameters\n ----------\n x : float or array\n The values of x over which to evaluate the cumulative Gaussian function\n\n mu : float\n The mean parameter. Determines the x value at which the y value is 0.5\n\n sigma : float\n The variance parameter. Determines the slope of the curve at the point\n of Deflection\n\n Returns\n -------\n\n g : float or array\n The cumulative gaussian with mean $\\\\mu$ and variance $\\\\sigma$\n evaluated at all points in `x`.\n\n Notes\n -----\n Based on:\n http://en.wikipedia.org/wiki/Normal_distribution#Cumulative_distribution_function\n\n The cumulative Gaussian function is defined as:\n\n .. math::\n\n \\\\Phi(x) = \\\\frac{1}{2} [1 + erf(\\\\frac{x}{\\\\sqrt{2}})]\n\n Where, $erf$, the error function is defined as:\n\n .. math::\n\n erf(x) = \\\\frac{1}{\\\\sqrt{\\\\pi}} \\int_{-x}^{x} e^{t^2} dt\n\n \"\"\"\n return 0.5 * (1 + erf((x - mu) / (np.sqrt(2) * sigma)))\n\n\ndef opt_err_func(params, x, y, func):\n \"\"\"\n Error function for fitting a function using non-linear optimization\n\n Parameters\n ----------\n params : tuple\n A tuple with the parameters of `func` according to their order of\n input\n\n x : float array\n An independent variable.\n\n y : float array\n The dependent variable.\n\n func : function\n A function with inputs: `(x, *params)`\n\n Returns\n -------\n float array\n The marginals of the fit to x/y given the params\n \"\"\"\n return y - func(x, *params)\n\n\nclass Model(object):\n \"\"\" Class for fitting cumulative Gaussian functions to data\"\"\"\n def __init__(self, func=cumgauss):\n \"\"\" Initialize a model object\n\n Parameters\n ----------\n data : Pandas DataFrame\n Data from a subjective contrast judgement experiment\n\n func : callable, optional\n A function that relates x and y through a set of parameters.\n Default: :func:`cumgauss`\n \"\"\"\n self.func = func\n\n def fit(self, x, y, initial=[0.5, 1]):\n \"\"\"\n Fit a Model to data\n\n Parameters\n ----------\n x : float or array\n The independent variable: contrast values presented in the\n experiment\n y : float or array\n The dependent variable\n\n Returns\n -------\n fit : :class:`Fit` instance\n A :class:`Fit` object that contains the parameters of the model.\n\n \"\"\"\n params, _ = opt.leastsq(opt_err_func, initial,\n args=(x, y, self.func))\n return Fit(self, params)\n\n\nclass Fit(object):\n \"\"\"\n Class for representing a fit of a model to data\n \"\"\"\n def __init__(self, model, params):\n \"\"\"\n Initialize a :class:`Fit` object\n\n Parameters\n ----------\n model : a :class:`Model` instance\n An object representing the model used\n\n params : array or list\n The parameters of the model evaluated for the data\n\n \"\"\"\n self.model = model\n self.params = params\n\n def predict(self, x):\n \"\"\"\n Predict values of the dependent variable based on values of the\n indpendent variable.\n\n Parameters\n ----------\n x : float or array\n Values of the independent variable. Can be values presented in\n the experiment. For out-of-sample prediction (e.g. in\n cross-validation), these can be values\n that were not presented in the experiment.\n\n Returns\n -------\n y : float or array\n Predicted values of the dependent variable, corresponding to\n values of the independent variable.\n \"\"\"\n return self.model.func(x, *self.params)\n"
] | [
[
"pandas.read_csv",
"numpy.sqrt",
"numpy.unique",
"scipy.optimize.leastsq",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
chatcannon/numpy | [
"f1b3f00f7abdd97d59dc5b1c0bb922a692452736"
] | [
"numpy/core/tests/test_scalarmath.py"
] | [
"from __future__ import division, absolute_import, print_function\n\nimport sys\nimport warnings\nimport itertools\nimport operator\n\nimport numpy as np\nfrom numpy.testing.utils import _gen_alignment_data\nfrom numpy.testing import (\n TestCase, run_module_suite, assert_, assert_equal, assert_raises,\n assert_almost_equal, assert_allclose, assert_array_equal, IS_PYPY,\n suppress_warnings\n)\n\ntypes = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,\n np.int_, np.uint, np.longlong, np.ulonglong,\n np.single, np.double, np.longdouble, np.csingle,\n np.cdouble, np.clongdouble]\n\nfloating_types = np.floating.__subclasses__()\n\n\n# This compares scalarmath against ufuncs.\n\nclass TestTypes(TestCase):\n def test_types(self, level=1):\n for atype in types:\n a = atype(1)\n assert_(a == 1, \"error with %r: got %r\" % (atype, a))\n\n def test_type_add(self, level=1):\n # list of types\n for k, atype in enumerate(types):\n a_scalar = atype(3)\n a_array = np.array([3], dtype=atype)\n for l, btype in enumerate(types):\n b_scalar = btype(1)\n b_array = np.array([1], dtype=btype)\n c_scalar = a_scalar + b_scalar\n c_array = a_array + b_array\n # It was comparing the type numbers, but the new ufunc\n # function-finding mechanism finds the lowest function\n # to which both inputs can be cast - which produces 'l'\n # when you do 'q' + 'b'. The old function finding mechanism\n # skipped ahead based on the first argument, but that\n # does not produce properly symmetric results...\n assert_equal(c_scalar.dtype, c_array.dtype,\n \"error with types (%d/'%c' + %d/'%c')\" %\n (k, np.dtype(atype).char, l, np.dtype(btype).char))\n\n def test_type_create(self, level=1):\n for k, atype in enumerate(types):\n a = np.array([1, 2, 3], atype)\n b = atype([1, 2, 3])\n assert_equal(a, b)\n\n def test_leak(self):\n # test leak of scalar objects\n # a leak would show up in valgrind as still-reachable of ~2.6MB\n for i in range(200000):\n np.add(1, 1)\n\n\nclass TestBaseMath(TestCase):\n def test_blocked(self):\n # test alignments offsets for simd instructions\n # alignments for vz + 2 * (vs - 1) + 1\n for dt, sz in [(np.float32, 11), (np.float64, 7), (np.int32, 11)]:\n for out, inp1, inp2, msg in _gen_alignment_data(dtype=dt,\n type='binary',\n max_size=sz):\n exp1 = np.ones_like(inp1)\n inp1[...] = np.ones_like(inp1)\n inp2[...] = np.zeros_like(inp2)\n assert_almost_equal(np.add(inp1, inp2), exp1, err_msg=msg)\n assert_almost_equal(np.add(inp1, 2), exp1 + 2, err_msg=msg)\n assert_almost_equal(np.add(1, inp2), exp1, err_msg=msg)\n\n np.add(inp1, inp2, out=out)\n assert_almost_equal(out, exp1, err_msg=msg)\n\n inp2[...] += np.arange(inp2.size, dtype=dt) + 1\n assert_almost_equal(np.square(inp2),\n np.multiply(inp2, inp2), err_msg=msg)\n # skip true divide for ints\n if dt != np.int32 or (sys.version_info.major < 3 and not sys.py3kwarning):\n assert_almost_equal(np.reciprocal(inp2),\n np.divide(1, inp2), err_msg=msg)\n\n inp1[...] = np.ones_like(inp1)\n np.add(inp1, 2, out=out)\n assert_almost_equal(out, exp1 + 2, err_msg=msg)\n inp2[...] = np.ones_like(inp2)\n np.add(2, inp2, out=out)\n assert_almost_equal(out, exp1 + 2, err_msg=msg)\n\n def test_lower_align(self):\n # check data that is not aligned to element size\n # i.e doubles are aligned to 4 bytes on i386\n d = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)\n o = np.zeros(23 * 8, dtype=np.int8)[4:-4].view(np.float64)\n assert_almost_equal(d + d, d * 2)\n np.add(d, d, out=o)\n np.add(np.ones_like(d), d, out=o)\n np.add(d, np.ones_like(d), out=o)\n np.add(np.ones_like(d), d)\n np.add(d, np.ones_like(d))\n\n\nclass TestPower(TestCase):\n def test_small_types(self):\n for t in [np.int8, np.int16, np.float16]:\n a = t(3)\n b = a ** 4\n assert_(b == 81, \"error with %r: got %r\" % (t, b))\n\n def test_large_types(self):\n for t in [np.int32, np.int64, np.float32, np.float64, np.longdouble]:\n a = t(51)\n b = a ** 4\n msg = \"error with %r: got %r\" % (t, b)\n if np.issubdtype(t, np.integer):\n assert_(b == 6765201, msg)\n else:\n assert_almost_equal(b, 6765201, err_msg=msg)\n\n def test_integers_to_negative_integer_power(self):\n # Note that the combination of uint64 with a signed integer\n # has common type np.float. The other combinations should all\n # raise a ValueError for integer ** negative integer.\n exp = [np.array(-1, dt)[()] for dt in 'bhilq']\n\n # 1 ** -1 possible special case\n base = [np.array(1, dt)[()] for dt in 'bhilqBHILQ']\n for i1, i2 in itertools.product(base, exp):\n if i1.dtype.name != 'uint64':\n assert_raises(ValueError, operator.pow, i1, i2)\n else:\n res = operator.pow(i1, i2)\n assert_(res.dtype.type is np.float64)\n assert_almost_equal(res, 1.)\n\n # -1 ** -1 possible special case\n base = [np.array(-1, dt)[()] for dt in 'bhilq']\n for i1, i2 in itertools.product(base, exp):\n if i1.dtype.name != 'uint64':\n assert_raises(ValueError, operator.pow, i1, i2)\n else:\n res = operator.pow(i1, i2)\n assert_(res.dtype.type is np.float64)\n assert_almost_equal(res, -1.)\n\n # 2 ** -1 perhaps generic\n base = [np.array(2, dt)[()] for dt in 'bhilqBHILQ']\n for i1, i2 in itertools.product(base, exp):\n if i1.dtype.name != 'uint64':\n assert_raises(ValueError, operator.pow, i1, i2)\n else:\n res = operator.pow(i1, i2)\n assert_(res.dtype.type is np.float64)\n assert_almost_equal(res, .5)\n\n def test_mixed_types(self):\n typelist = [np.int8, np.int16, np.float16,\n np.float32, np.float64, np.int8,\n np.int16, np.int32, np.int64]\n for t1 in typelist:\n for t2 in typelist:\n a = t1(3)\n b = t2(2)\n result = a**b\n msg = (\"error with %r and %r:\"\n \"got %r, expected %r\") % (t1, t2, result, 9)\n if np.issubdtype(np.dtype(result), np.integer):\n assert_(result == 9, msg)\n else:\n assert_almost_equal(result, 9, err_msg=msg)\n\n def test_modular_power(self):\n # modular power is not implemented, so ensure it errors\n a = 5\n b = 4\n c = 10\n expected = pow(a, b, c)\n for t in (np.int32, np.float32, np.complex64):\n # note that 3-operand power only dispatches on the first argument\n assert_raises(TypeError, operator.pow, t(a), b, c)\n assert_raises(TypeError, operator.pow, np.array(t(a)), b, c)\n\n\nclass TestModulus(TestCase):\n\n floordiv = operator.floordiv\n mod = operator.mod\n\n def test_modulus_basic(self):\n dt = np.typecodes['AllInteger'] + np.typecodes['Float']\n for dt1, dt2 in itertools.product(dt, dt):\n for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):\n if sg1 == -1 and dt1 in np.typecodes['UnsignedInteger']:\n continue\n if sg2 == -1 and dt2 in np.typecodes['UnsignedInteger']:\n continue\n fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s'\n msg = fmt % (dt1, dt2, sg1, sg2)\n a = np.array(sg1*71, dtype=dt1)[()]\n b = np.array(sg2*19, dtype=dt2)[()]\n div = self.floordiv(a, b)\n rem = self.mod(a, b)\n assert_equal(div*b + rem, a, err_msg=msg)\n if sg2 == -1:\n assert_(b < rem <= 0, msg)\n else:\n assert_(b > rem >= 0, msg)\n\n def test_float_modulus_exact(self):\n # test that float results are exact for small integers. This also\n # holds for the same integers scaled by powers of two.\n nlst = list(range(-127, 0))\n plst = list(range(1, 128))\n dividend = nlst + [0] + plst\n divisor = nlst + plst\n arg = list(itertools.product(dividend, divisor))\n tgt = list(divmod(*t) for t in arg)\n\n a, b = np.array(arg, dtype=int).T\n # convert exact integer results from Python to float so that\n # signed zero can be used, it is checked.\n tgtdiv, tgtrem = np.array(tgt, dtype=float).T\n tgtdiv = np.where((tgtdiv == 0.0) & ((b < 0) ^ (a < 0)), -0.0, tgtdiv)\n tgtrem = np.where((tgtrem == 0.0) & (b < 0), -0.0, tgtrem)\n\n for dt in np.typecodes['Float']:\n msg = 'dtype: %s' % (dt,)\n fa = a.astype(dt)\n fb = b.astype(dt)\n # use list comprehension so a_ and b_ are scalars\n div = [self.floordiv(a_, b_) for a_, b_ in zip(fa, fb)]\n rem = [self.mod(a_, b_) for a_, b_ in zip(fa, fb)]\n assert_equal(div, tgtdiv, err_msg=msg)\n assert_equal(rem, tgtrem, err_msg=msg)\n\n def test_float_modulus_roundoff(self):\n # gh-6127\n dt = np.typecodes['Float']\n for dt1, dt2 in itertools.product(dt, dt):\n for sg1, sg2 in itertools.product((+1, -1), (+1, -1)):\n fmt = 'dt1: %s, dt2: %s, sg1: %s, sg2: %s'\n msg = fmt % (dt1, dt2, sg1, sg2)\n a = np.array(sg1*78*6e-8, dtype=dt1)[()]\n b = np.array(sg2*6e-8, dtype=dt2)[()]\n div = self.floordiv(a, b)\n rem = self.mod(a, b)\n # Equal assertion should hold when fmod is used\n assert_equal(div*b + rem, a, err_msg=msg)\n if sg2 == -1:\n assert_(b < rem <= 0, msg)\n else:\n assert_(b > rem >= 0, msg)\n\n def test_float_modulus_corner_cases(self):\n # Check remainder magnitude.\n for dt in np.typecodes['Float']:\n b = np.array(1.0, dtype=dt)\n a = np.nextafter(np.array(0.0, dtype=dt), -b)\n rem = self.mod(a, b)\n assert_(rem <= b, 'dt: %s' % dt)\n rem = self.mod(-a, -b)\n assert_(rem >= -b, 'dt: %s' % dt)\n\n # Check nans, inf\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning, \"invalid value encountered in remainder\")\n for dt in np.typecodes['Float']:\n fone = np.array(1.0, dtype=dt)\n fzer = np.array(0.0, dtype=dt)\n finf = np.array(np.inf, dtype=dt)\n fnan = np.array(np.nan, dtype=dt)\n rem = self.mod(fone, fzer)\n assert_(np.isnan(rem), 'dt: %s' % dt)\n # MSVC 2008 returns NaN here, so disable the check.\n #rem = self.mod(fone, finf)\n #assert_(rem == fone, 'dt: %s' % dt)\n rem = self.mod(fone, fnan)\n assert_(np.isnan(rem), 'dt: %s' % dt)\n rem = self.mod(finf, fone)\n assert_(np.isnan(rem), 'dt: %s' % dt)\n\n\nclass TestComplexDivision(TestCase):\n def test_zero_division(self):\n with np.errstate(all=\"ignore\"):\n for t in [np.complex64, np.complex128]:\n a = t(0.0)\n b = t(1.0)\n assert_(np.isinf(b/a))\n b = t(complex(np.inf, np.inf))\n assert_(np.isinf(b/a))\n b = t(complex(np.inf, np.nan))\n assert_(np.isinf(b/a))\n b = t(complex(np.nan, np.inf))\n assert_(np.isinf(b/a))\n b = t(complex(np.nan, np.nan))\n assert_(np.isnan(b/a))\n b = t(0.)\n assert_(np.isnan(b/a))\n\n def test_signed_zeros(self):\n with np.errstate(all=\"ignore\"):\n for t in [np.complex64, np.complex128]:\n # tupled (numerator, denominator, expected)\n # for testing as expected == numerator/denominator\n data = (\n (( 0.0,-1.0), ( 0.0, 1.0), (-1.0,-0.0)),\n (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),\n (( 0.0,-1.0), (-0.0,-1.0), ( 1.0, 0.0)),\n (( 0.0,-1.0), (-0.0, 1.0), (-1.0, 0.0)),\n (( 0.0, 1.0), ( 0.0,-1.0), (-1.0, 0.0)),\n (( 0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),\n ((-0.0,-1.0), ( 0.0,-1.0), ( 1.0,-0.0)),\n ((-0.0, 1.0), ( 0.0,-1.0), (-1.0,-0.0))\n )\n for cases in data:\n n = cases[0]\n d = cases[1]\n ex = cases[2]\n result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))\n # check real and imag parts separately to avoid comparison\n # in array context, which does not account for signed zeros\n assert_equal(result.real, ex[0])\n assert_equal(result.imag, ex[1])\n\n def test_branches(self):\n with np.errstate(all=\"ignore\"):\n for t in [np.complex64, np.complex128]:\n # tupled (numerator, denominator, expected)\n # for testing as expected == numerator/denominator\n data = list()\n\n # trigger branch: real(fabs(denom)) > imag(fabs(denom))\n # followed by else condition as neither are == 0\n data.append((( 2.0, 1.0), ( 2.0, 1.0), (1.0, 0.0)))\n\n # trigger branch: real(fabs(denom)) > imag(fabs(denom))\n # followed by if condition as both are == 0\n # is performed in test_zero_division(), so this is skipped\n\n # trigger else if branch: real(fabs(denom)) < imag(fabs(denom))\n data.append((( 1.0, 2.0), ( 1.0, 2.0), (1.0, 0.0)))\n\n for cases in data:\n n = cases[0]\n d = cases[1]\n ex = cases[2]\n result = t(complex(n[0], n[1])) / t(complex(d[0], d[1]))\n # check real and imag parts separately to avoid comparison\n # in array context, which does not account for signed zeros\n assert_equal(result.real, ex[0])\n assert_equal(result.imag, ex[1])\n\n\nclass TestConversion(TestCase):\n def test_int_from_long(self):\n l = [1e6, 1e12, 1e18, -1e6, -1e12, -1e18]\n li = [10**6, 10**12, 10**18, -10**6, -10**12, -10**18]\n for T in [None, np.float64, np.int64]:\n a = np.array(l, dtype=T)\n assert_equal([int(_m) for _m in a], li)\n\n a = np.array(l[:3], dtype=np.uint64)\n assert_equal([int(_m) for _m in a], li[:3])\n\n def test_iinfo_long_values(self):\n for code in 'bBhH':\n res = np.array(np.iinfo(code).max + 1, dtype=code)\n tgt = np.iinfo(code).min\n assert_(res == tgt)\n\n for code in np.typecodes['AllInteger']:\n res = np.array(np.iinfo(code).max, dtype=code)\n tgt = np.iinfo(code).max\n assert_(res == tgt)\n\n for code in np.typecodes['AllInteger']:\n res = np.typeDict[code](np.iinfo(code).max)\n tgt = np.iinfo(code).max\n assert_(res == tgt)\n\n def test_int_raise_behaviour(self):\n def overflow_error_func(dtype):\n np.typeDict[dtype](np.iinfo(dtype).max + 1)\n\n for code in 'lLqQ':\n assert_raises(OverflowError, overflow_error_func, code)\n\n def test_longdouble_int(self):\n # gh-627\n x = np.longdouble(np.inf)\n assert_raises(OverflowError, x.__int__)\n x = np.clongdouble(np.inf)\n assert_raises(OverflowError, x.__int__)\n\n def test_numpy_scalar_relational_operators(self):\n # All integer\n for dt1 in np.typecodes['AllInteger']:\n assert_(1 > np.array(0, dtype=dt1)[()], \"type %s failed\" % (dt1,))\n assert_(not 1 < np.array(0, dtype=dt1)[()], \"type %s failed\" % (dt1,))\n\n for dt2 in np.typecodes['AllInteger']:\n assert_(np.array(1, dtype=dt1)[()] > np.array(0, dtype=dt2)[()],\n \"type %s and %s failed\" % (dt1, dt2))\n assert_(not np.array(1, dtype=dt1)[()] < np.array(0, dtype=dt2)[()],\n \"type %s and %s failed\" % (dt1, dt2))\n\n #Unsigned integers\n for dt1 in 'BHILQP':\n assert_(-1 < np.array(1, dtype=dt1)[()], \"type %s failed\" % (dt1,))\n assert_(not -1 > np.array(1, dtype=dt1)[()], \"type %s failed\" % (dt1,))\n assert_(-1 != np.array(1, dtype=dt1)[()], \"type %s failed\" % (dt1,))\n\n #unsigned vs signed\n for dt2 in 'bhilqp':\n assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],\n \"type %s and %s failed\" % (dt1, dt2))\n assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],\n \"type %s and %s failed\" % (dt1, dt2))\n assert_(np.array(1, dtype=dt1)[()] != np.array(-1, dtype=dt2)[()],\n \"type %s and %s failed\" % (dt1, dt2))\n\n #Signed integers and floats\n for dt1 in 'bhlqp' + np.typecodes['Float']:\n assert_(1 > np.array(-1, dtype=dt1)[()], \"type %s failed\" % (dt1,))\n assert_(not 1 < np.array(-1, dtype=dt1)[()], \"type %s failed\" % (dt1,))\n assert_(-1 == np.array(-1, dtype=dt1)[()], \"type %s failed\" % (dt1,))\n\n for dt2 in 'bhlqp' + np.typecodes['Float']:\n assert_(np.array(1, dtype=dt1)[()] > np.array(-1, dtype=dt2)[()],\n \"type %s and %s failed\" % (dt1, dt2))\n assert_(not np.array(1, dtype=dt1)[()] < np.array(-1, dtype=dt2)[()],\n \"type %s and %s failed\" % (dt1, dt2))\n assert_(np.array(-1, dtype=dt1)[()] == np.array(-1, dtype=dt2)[()],\n \"type %s and %s failed\" % (dt1, dt2))\n\n def test_scalar_comparison_to_none(self):\n # Scalars should just return False and not give a warnings.\n # The comparisons are flagged by pep8, ignore that.\n with warnings.catch_warnings(record=True) as w:\n warnings.filterwarnings('always', '', FutureWarning)\n assert_(not np.float32(1) == None)\n assert_(not np.str_('test') == None)\n # This is dubious (see below):\n assert_(not np.datetime64('NaT') == None)\n\n assert_(np.float32(1) != None)\n assert_(np.str_('test') != None)\n # This is dubious (see below):\n assert_(np.datetime64('NaT') != None)\n assert_(len(w) == 0)\n\n # For documentation purposes, this is why the datetime is dubious.\n # At the time of deprecation this was no behaviour change, but\n # it has to be considered when the deprecations are done.\n assert_(np.equal(np.datetime64('NaT'), None))\n\n\n#class TestRepr(TestCase):\n# def test_repr(self):\n# for t in types:\n# val = t(1197346475.0137341)\n# val_repr = repr(val)\n# val2 = eval(val_repr)\n# assert_equal( val, val2 )\n\n\nclass TestRepr(object):\n def _test_type_repr(self, t):\n finfo = np.finfo(t)\n last_fraction_bit_idx = finfo.nexp + finfo.nmant\n last_exponent_bit_idx = finfo.nexp\n storage_bytes = np.dtype(t).itemsize*8\n # could add some more types to the list below\n for which in ['small denorm', 'small norm']:\n # Values from http://en.wikipedia.org/wiki/IEEE_754\n constr = np.array([0x00]*storage_bytes, dtype=np.uint8)\n if which == 'small denorm':\n byte = last_fraction_bit_idx // 8\n bytebit = 7-(last_fraction_bit_idx % 8)\n constr[byte] = 1 << bytebit\n elif which == 'small norm':\n byte = last_exponent_bit_idx // 8\n bytebit = 7-(last_exponent_bit_idx % 8)\n constr[byte] = 1 << bytebit\n else:\n raise ValueError('hmm')\n val = constr.view(t)[0]\n val_repr = repr(val)\n val2 = t(eval(val_repr))\n if not (val2 == 0 and val < 1e-100):\n assert_equal(val, val2)\n\n def test_float_repr(self):\n # long double test cannot work, because eval goes through a python\n # float\n for t in [np.float32, np.float64]:\n yield self._test_type_repr, t\n\n\nif not IS_PYPY:\n # sys.getsizeof() is not valid on PyPy\n class TestSizeOf(TestCase):\n\n def test_equal_nbytes(self):\n for type in types:\n x = type(0)\n assert_(sys.getsizeof(x) > x.nbytes)\n\n def test_error(self):\n d = np.float32()\n assert_raises(TypeError, d.__sizeof__, \"a\")\n\n\nclass TestMultiply(TestCase):\n def test_seq_repeat(self):\n # Test that basic sequences get repeated when multiplied with\n # numpy integers. And errors are raised when multiplied with others.\n # Some of this behaviour may be controversial and could be open for\n # change.\n for seq_type in (list, tuple):\n seq = seq_type([1, 2, 3])\n for numpy_type in np.typecodes[\"AllInteger\"]:\n i = np.dtype(numpy_type).type(2)\n assert_equal(seq * i, seq * int(i))\n assert_equal(i * seq, int(i) * seq)\n\n for numpy_type in np.typecodes[\"All\"].replace(\"V\", \"\"):\n if numpy_type in np.typecodes[\"AllInteger\"]:\n continue\n i = np.dtype(numpy_type).type()\n assert_raises(TypeError, operator.mul, seq, i)\n assert_raises(TypeError, operator.mul, i, seq)\n\n def test_no_seq_repeat_basic_array_like(self):\n # Test that an array-like which does not know how to be multiplied\n # does not attempt sequence repeat (raise TypeError).\n # See also gh-7428.\n class ArrayLike(object):\n def __init__(self, arr):\n self.arr = arr\n def __array__(self):\n return self.arr\n\n # Test for simple ArrayLike above and memoryviews (original report)\n for arr_like in (ArrayLike(np.ones(3)), memoryview(np.ones(3))):\n assert_array_equal(arr_like * np.float32(3.), np.full(3, 3.))\n assert_array_equal(np.float32(3.) * arr_like, np.full(3, 3.))\n assert_array_equal(arr_like * np.int_(3), np.full(3, 3))\n assert_array_equal(np.int_(3) * arr_like, np.full(3, 3))\n\n\nclass TestNegative(TestCase):\n def test_exceptions(self):\n a = np.ones((), dtype=np.bool_)[()]\n assert_raises(TypeError, operator.neg, a)\n\n def test_result(self):\n types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning)\n for dt in types:\n a = np.ones((), dtype=dt)[()]\n assert_equal(operator.neg(a) + a, 0)\n\n\nclass TestSubtract(TestCase):\n def test_exceptions(self):\n a = np.ones((), dtype=np.bool_)[()]\n assert_raises(TypeError, operator.sub, a, a)\n\n def test_result(self):\n types = np.typecodes['AllInteger'] + np.typecodes['AllFloat']\n with suppress_warnings() as sup:\n sup.filter(RuntimeWarning)\n for dt in types:\n a = np.ones((), dtype=dt)[()]\n assert_equal(operator.sub(a, a), 0)\n\n\nclass TestAbs(TestCase):\n\n def _test_abs_func(self, absfunc):\n for tp in floating_types:\n x = tp(-1.5)\n assert_equal(absfunc(x), 1.5)\n x = tp(0.0)\n res = absfunc(x)\n # assert_equal() checks zero signedness\n assert_equal(res, 0.0)\n x = tp(-0.0)\n res = absfunc(x)\n assert_equal(res, 0.0)\n\n def test_builtin_abs(self):\n self._test_abs_func(abs)\n\n def test_numpy_abs(self):\n self._test_abs_func(np.abs)\n\n\nif __name__ == \"__main__\":\n run_module_suite()\n"
] | [
[
"numpy.floating.__subclasses__",
"numpy.issubdtype",
"numpy.dtype",
"numpy.longdouble",
"numpy.zeros_like",
"numpy.iinfo",
"numpy.clongdouble",
"numpy.str_",
"numpy.where",
"numpy.divide",
"numpy.square",
"numpy.testing.assert_equal",
"numpy.ones_like",
"numpy.testing.suppress_warnings",
"numpy.arange",
"numpy.finfo",
"numpy.testing.utils._gen_alignment_data",
"numpy.testing.assert_almost_equal",
"numpy.full",
"numpy.float32",
"numpy.reciprocal",
"numpy.zeros",
"numpy.multiply",
"numpy.isnan",
"numpy.int_",
"numpy.testing.assert_raises",
"numpy.testing.assert_",
"numpy.errstate",
"numpy.array",
"numpy.testing.run_module_suite",
"numpy.ones",
"numpy.datetime64",
"numpy.add",
"numpy.isinf"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sr-murthy/oedtools | [
"f38d7bef1ffb094b09e779db5b741e63926d2d1e"
] | [
"tests/test_schema.py"
] | [
"import builtins\nimport io\nimport json\nimport os\nimport re\nimport string\nimport sys\nimport time\n\nfrom ast import literal_eval\nfrom collections import OrderedDict\nfrom datetime import datetime\nfrom itertools import groupby\nfrom json import JSONDecodeError\nfrom tempfile import NamedTemporaryFile\nfrom unittest import TestCase\n\nimport numpy as np\nimport pandas as pd\nimport pytest\n\nfrom hypothesis import (\n given,\n settings,\n)\nfrom hypothesis.strategies import (\n just,\n sampled_from,\n text,\n)\n\nfrom oedtools.exceptions import (\n OedError,\n NonOedColumnError,\n NonOedSchemaColumnError,\n NonOedSchemaAndColumnError,\n NonOedSchemaError,\n)\nfrom oedtools.schema import (\n generate_schema,\n get_column_schema,\n get_grouped_master_schema,\n get_schema,\n get_schema_version,\n get_values_profile,\n sample_column,\n SCHEMA_DIR,\n update_schemas,\n)\n\nfrom .data import (\n ACC,\n ALL,\n get_method,\n get_value,\n LOC,\n REINSINFO,\n REINSSCOPE,\n SCHEMA_TYPES,\n SCHEMA_TYPES_EX_MASTER,\n VALUE_GROUPS,\n)\n\n\nclass TestSchema(TestCase):\n\n def setUp(self):\n self.SCHEMA_DIR = SCHEMA_DIR\n\n self.schema_version_fp = os.path.join(self.SCHEMA_DIR, 'schema_version.txt')\n \n self.values_csv_fp = os.path.join(self.SCHEMA_DIR, 'values.csv')\n self.values_json_fp = os.path.join(self.SCHEMA_DIR, 'values.json')\n self.values_profile = get_values_profile()\n \n self.master_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'master_def.csv')\n self.master_json_schema_fp = os.path.join(self.SCHEMA_DIR, 'master_schema.json')\n self.master_schema = get_schema()\n\n self.loc_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'loc_def.csv')\n self.loc_json_schema_fp = os.path.join(self.SCHEMA_DIR, 'loc_schema.json')\n self.loc_schema = get_schema('loc')\n \n self.acc_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'acc_def.csv')\n self.acc_json_schema_fp = os.path.join(self.SCHEMA_DIR, 'acc_schema.json')\n self.acc_schema = get_schema('acc')\n\n self.reinsinfo_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'reinsinfo_def.csv')\n self.reinsinfo_json_schema_fp = os.path.join(self.SCHEMA_DIR, 'reinsinfo_schema.json')\n self.reinsinfo_schema = get_schema('reinsinfo')\n \n self.reinsscope_csv_schema_fp = os.path.join(self.SCHEMA_DIR, 'reinsscope_def.csv')\n self.reinsscope_json_schema_fp = os.path.join(self.SCHEMA_DIR, 'reinsscope_schema.json')\n self.reinsscope_schema = get_schema('reinsscope')\n\n self.csv_to_json_col_map = {\n 'backend_db_field_name': 'oed_db_field_name',\n 'backend_table': 'oed_db_table',\n 'blank': 'blank',\n 'default': 'default',\n 'desc': 'desc',\n 'entity': 'entity',\n 'field_name': 'field_name',\n 'required': 'required',\n 'secmod': 'secmod',\n 'type': 'sql_dtype'\n }\n\n self.json_to_csv_type_col_map = {\n 'unsigned bit': '0 or 1',\n 'unsigned tinyint': 'tinyint',\n 'unsigned smallint': 'smallint',\n 'unsigned int': 'int',\n 'unsigned bigint': 'bigint'\n }\n\n self.json_to_csv_blank_col_map = {\n False: 'NO',\n True: 'YES'\n }\n\n def test_generate_schema__master(self):\n master_df = pd.read_csv(self.master_csv_schema_fp)\n master_df.columns = master_df.columns.str.lower()\n master_df = master_df.where(master_df.notnull(), None)\n master_df = master_df.sort_values(['entity', 'field_name']).reset_index(drop=True)\n\n with NamedTemporaryFile('w') as target_json:\n generate_schema(self.master_csv_schema_fp, target_json.name)\n with io.open(os.path.join('oedtools', 'schema', 'master_schema.json'), 'r', encoding='utf-8') as f: \n res_schema = {literal_eval(k): v for k, v in json.load(f).items()}\n\n for exp_key, exp_it in (((it['entity'].lower(), it['field_name'].lower()), it) for _, it in master_df.iterrows()):\n self.assertIn(exp_key, res_schema)\n res_key = exp_key\n res_it = res_schema[res_key]\n self.assertIsInstance(res_it, dict)\n self.assertEqual(exp_it['backend_db_field_name'], res_it[self.csv_to_json_col_map['backend_db_field_name']])\n self.assertEqual(exp_it['backend_table'], res_it[self.csv_to_json_col_map['backend_table']])\n self.assertEqual(exp_it['blank'], self.json_to_csv_blank_col_map[res_it[self.csv_to_json_col_map['blank']]])\n if res_it['py_dtype'] is not None:\n self.assertEqual(\n getattr(builtins, res_it['py_dtype'])(exp_it['default'] if res_it['py_dtype'] != 'bool' else int(exp_it['default'])) if exp_it['default'] is not None else None,\n res_it[self.csv_to_json_col_map['default']]\n )\n self.assertEqual(exp_it['desc'], res_it[self.csv_to_json_col_map['desc']])\n self.assertEqual(exp_it['entity'], res_it[self.csv_to_json_col_map['entity']])\n self.assertEqual(exp_it['field_name'], res_it[self.csv_to_json_col_map['field_name']])\n self.assertEqual(exp_it['required'], res_it[self.csv_to_json_col_map['required']])\n self.assertEqual(\n exp_it['type'],\n (\n self.json_to_csv_type_col_map[res_it['sql_dtype']]\n if res_it['sql_dtype'].endswith('int') or res_it['sql_dtype'].endswith('bit')\n else res_it['sql_dtype']\n )\n )\n\n def test_generate_schema__loc(self):\n loc_df = pd.read_csv(self.loc_csv_schema_fp)\n loc_df.columns = loc_df.columns.str.lower()\n loc_df = loc_df.where(loc_df.notnull(), None)\n loc_df = loc_df.sort_values(['entity', 'field_name']).reset_index(drop=True)\n\n with NamedTemporaryFile('w') as target_json:\n generate_schema(self.loc_csv_schema_fp, target_json.name)\n with io.open(os.path.join('oedtools', 'schema', 'loc_schema.json'), 'r', encoding='utf-8') as f: \n res_schema = {literal_eval(k): v for k, v in json.load(f).items()}\n\n for exp_key, exp_it in (((it['entity'].lower(), it['field_name'].lower()), it) for _, it in loc_df.iterrows()):\n self.assertIn(exp_key, res_schema)\n res_key = exp_key\n res_it = res_schema[res_key]\n self.assertIsInstance(res_it, dict)\n self.assertEqual(exp_it['backend_db_field_name'], res_it[self.csv_to_json_col_map['backend_db_field_name']])\n self.assertEqual(exp_it['backend_table'], res_it[self.csv_to_json_col_map['backend_table']])\n self.assertEqual(exp_it['blank'], self.json_to_csv_blank_col_map[res_it[self.csv_to_json_col_map['blank']]])\n if res_it['py_dtype'] is not None:\n self.assertEqual(\n getattr(builtins, res_it['py_dtype'])(exp_it['default'] if res_it['py_dtype'] != 'bool' else int(exp_it['default'])) if exp_it['default'] is not None else None,\n res_it[self.csv_to_json_col_map['default']]\n )\n self.assertEqual(exp_it['desc'], res_it[self.csv_to_json_col_map['desc']])\n self.assertEqual(exp_it['entity'], res_it[self.csv_to_json_col_map['entity']])\n self.assertEqual(exp_it['field_name'], res_it[self.csv_to_json_col_map['field_name']])\n self.assertEqual(exp_it['required'], res_it[self.csv_to_json_col_map['required']])\n self.assertEqual(\n exp_it['type'],\n (\n self.json_to_csv_type_col_map[res_it['sql_dtype']]\n if res_it['sql_dtype'].endswith('int') or res_it['sql_dtype'].endswith('bit')\n else res_it['sql_dtype']\n )\n )\n\n def test_generate_schema__acc(self):\n acc_df = pd.read_csv(self.acc_csv_schema_fp)\n acc_df.columns = acc_df.columns.str.lower()\n acc_df = acc_df.where(acc_df.notnull(), None)\n acc_df = acc_df.sort_values(['entity', 'field_name']).reset_index(drop=True)\n\n with NamedTemporaryFile('w') as target_json:\n generate_schema(self.acc_csv_schema_fp, target_json.name)\n with io.open(os.path.join('oedtools', 'schema', 'acc_schema.json'), 'r', encoding='utf-8') as f: \n res_schema = {literal_eval(k): v for k, v in json.load(f).items()}\n\n for exp_key, exp_it in (((it['entity'].lower(), it['field_name'].lower()), it) for _, it in acc_df.iterrows()):\n self.assertIn(exp_key, res_schema)\n res_key = exp_key\n res_it = res_schema[res_key]\n self.assertIsInstance(res_it, dict)\n self.assertEqual(exp_it['backend_db_field_name'], res_it[self.csv_to_json_col_map['backend_db_field_name']])\n self.assertEqual(exp_it['backend_table'], res_it[self.csv_to_json_col_map['backend_table']])\n self.assertEqual(exp_it['blank'], self.json_to_csv_blank_col_map[res_it[self.csv_to_json_col_map['blank']]])\n if res_it['py_dtype'] is not None:\n self.assertEqual(\n getattr(builtins, res_it['py_dtype'])(exp_it['default'] if res_it['py_dtype'] != 'bool' else int(exp_it['default'])) if exp_it['default'] is not None else None,\n res_it[self.csv_to_json_col_map['default']]\n )\n self.assertEqual(exp_it['desc'], res_it[self.csv_to_json_col_map['desc']])\n self.assertEqual(exp_it['entity'], res_it[self.csv_to_json_col_map['entity']])\n self.assertEqual(exp_it['field_name'], res_it[self.csv_to_json_col_map['field_name']])\n self.assertEqual(exp_it['required'], res_it[self.csv_to_json_col_map['required']])\n self.assertEqual(\n exp_it['type'],\n (\n self.json_to_csv_type_col_map[res_it['sql_dtype']]\n if res_it['sql_dtype'].endswith('int') or res_it['sql_dtype'].endswith('bit')\n else res_it['sql_dtype']\n )\n )\n\n def test_generate_schema__reinsinfo(self):\n reinsinfo_df = pd.read_csv(self.reinsinfo_csv_schema_fp)\n reinsinfo_df.columns = reinsinfo_df.columns.str.lower()\n reinsinfo_df = reinsinfo_df.where(reinsinfo_df.notnull(), None)\n reinsinfo_df = reinsinfo_df.sort_values(['entity', 'field_name']).reset_index(drop=True)\n\n with NamedTemporaryFile('w') as target_json:\n generate_schema(self.reinsinfo_csv_schema_fp, target_json.name)\n with io.open(os.path.join('oedtools', 'schema', 'reinsinfo_schema.json'), 'r', encoding='utf-8') as f: \n res_schema = {literal_eval(k): v for k, v in json.load(f).items()}\n\n for exp_key, exp_it in (((it['entity'].lower(), it['field_name'].lower()), it) for _, it in reinsinfo_df.iterrows()):\n self.assertIn(exp_key, res_schema)\n res_key = exp_key\n res_it = res_schema[res_key]\n self.assertIsInstance(res_it, dict)\n self.assertEqual(exp_it['backend_db_field_name'], res_it[self.csv_to_json_col_map['backend_db_field_name']])\n self.assertEqual(exp_it['backend_table'], res_it[self.csv_to_json_col_map['backend_table']])\n self.assertEqual(exp_it['blank'], self.json_to_csv_blank_col_map[res_it[self.csv_to_json_col_map['blank']]])\n if res_it['py_dtype']:\n self.assertEqual(\n getattr(builtins, res_it['py_dtype'])(exp_it['default'] if res_it['py_dtype'] != 'bool' else int(exp_it['default'])) if exp_it['default'] is not None else None,\n res_it[self.csv_to_json_col_map['default']]\n )\n self.assertEqual(exp_it['desc'], res_it[self.csv_to_json_col_map['desc']])\n self.assertEqual(exp_it['entity'], res_it[self.csv_to_json_col_map['entity']])\n self.assertEqual(exp_it['field_name'], res_it[self.csv_to_json_col_map['field_name']])\n self.assertEqual(exp_it['required'], res_it[self.csv_to_json_col_map['required']])\n self.assertEqual(\n exp_it['type'],\n (\n self.json_to_csv_type_col_map[res_it['sql_dtype']]\n if res_it['sql_dtype'].endswith('int') or res_it['sql_dtype'].endswith('bit')\n else res_it['sql_dtype']\n )\n )\n\n def test_generate_schema__reinsscope(self):\n reinsscope_df = pd.read_csv(self.reinsscope_csv_schema_fp)\n reinsscope_df.columns = reinsscope_df.columns.str.lower()\n reinsscope_df = reinsscope_df.where(reinsscope_df.notnull(), None)\n reinsscope_df = reinsscope_df.sort_values(['entity', 'field_name']).reset_index(drop=True)\n\n with NamedTemporaryFile('w') as target_json:\n generate_schema(self.reinsscope_csv_schema_fp, target_json.name)\n with io.open(os.path.join('oedtools', 'schema', 'reinsscope_schema.json'), 'r', encoding='utf-8') as f: \n res_schema = {literal_eval(k): v for k, v in json.load(f).items()}\n\n for exp_key, exp_it in (((it['entity'].lower(), it['field_name'].lower()), it) for _, it in reinsscope_df.iterrows()):\n self.assertIn(exp_key, res_schema)\n res_key = exp_key\n res_it = res_schema[res_key]\n self.assertIsInstance(res_it, dict)\n self.assertEqual(exp_it['backend_db_field_name'], res_it[self.csv_to_json_col_map['backend_db_field_name']])\n self.assertEqual(exp_it['backend_table'], res_it[self.csv_to_json_col_map['backend_table']])\n self.assertEqual(exp_it['blank'], self.json_to_csv_blank_col_map[res_it[self.csv_to_json_col_map['blank']]])\n if res_it['py_dtype']:\n self.assertEqual(\n getattr(builtins, res_it['py_dtype'])(exp_it['default'] if res_it['py_dtype'] != 'bool' else int(exp_it['default'])) if exp_it['default'] is not None else None,\n res_it[self.csv_to_json_col_map['default']]\n )\n self.assertEqual(exp_it['desc'], res_it[self.csv_to_json_col_map['desc']])\n self.assertEqual(exp_it['entity'], res_it[self.csv_to_json_col_map['entity']])\n self.assertEqual(exp_it['field_name'], res_it[self.csv_to_json_col_map['field_name']])\n self.assertEqual(exp_it['required'], res_it[self.csv_to_json_col_map['required']])\n self.assertEqual(\n exp_it['type'],\n (\n self.json_to_csv_type_col_map[res_it['sql_dtype']]\n if res_it['sql_dtype'].endswith('int') or res_it['sql_dtype'].endswith('bit')\n else res_it['sql_dtype']\n )\n )\n\n def test_get_schema__master(self):\n with io.open(self.master_json_schema_fp, 'r', encoding='utf-8') as f:\n exp_schema = OrderedDict({\n literal_eval(k): (\n v if not v['dtype_range']\n else {_k: (\n _v if not _k == 'dtype_range'\n else (\n range(_v['start'], _v['stop']) if v['py_dtype'] == 'int' and isinstance(_v, dict) and 'start' in _v and 'stop' in _v\n else _v\n )\n ) for _k, _v in v.items()\n }\n )\n for k, v in json.load(f).items()\n })\n exp_schema = OrderedDict({\n k: (\n v if not isinstance(v.get('column_validation'), dict) or 'start' not in v['column_validation']\n else {**v, **{'column_validation': v['dtype_range']}}\n )\n for k, v in exp_schema.items()\n })\n \n res_schema = get_schema()\n\n self.assertEqual(exp_schema, res_schema)\n\n def test_get_schema__loc(self):\n with io.open(self.loc_json_schema_fp, 'r', encoding='utf-8') as f:\n exp_schema = OrderedDict({\n literal_eval(k): (\n v if not v['dtype_range']\n else {_k: (\n _v if not _k == 'dtype_range'\n else (\n range(_v['start'], _v['stop']) if v['py_dtype'] == 'int' and isinstance(_v, dict) and 'start' in _v and 'stop' in _v\n else _v\n )\n ) for _k, _v in v.items()\n }\n )\n for k, v in json.load(f).items()\n })\n exp_schema = OrderedDict({\n k: (\n v if not isinstance(v.get('column_validation'), dict) or 'start' not in v['column_validation']\n else {**v, **{'column_validation': v['dtype_range']}}\n )\n for k, v in exp_schema.items()\n })\n \n res_schema = get_schema('loc')\n\n self.assertEqual(exp_schema, res_schema)\n\n def test_get_schema__acc(self):\n with io.open(self.acc_json_schema_fp, 'r', encoding='utf-8') as f:\n exp_schema = OrderedDict({\n literal_eval(k): (\n v if not v['dtype_range']\n else {_k: (\n _v if not _k == 'dtype_range'\n else (\n range(_v['start'], _v['stop']) if v['py_dtype'] == 'int' and isinstance(_v, dict) and 'start' in _v and 'stop' in _v\n else _v\n )\n ) for _k, _v in v.items()\n }\n )\n for k, v in json.load(f).items()\n })\n exp_schema = OrderedDict({\n k: (\n v if not isinstance(v.get('column_validation'), dict) or 'start' not in v['column_validation']\n else {**v, **{'column_validation': v['dtype_range']}}\n )\n for k, v in exp_schema.items()\n })\n \n res_schema = get_schema('acc')\n\n self.assertEqual(exp_schema, res_schema)\n\n def test_get_schema__reinsinfo(self):\n with io.open(self.reinsinfo_json_schema_fp, 'r', encoding='utf-8') as f:\n exp_schema = OrderedDict({\n literal_eval(k): (\n v if not v['dtype_range']\n else {_k: (\n _v if not _k == 'dtype_range'\n else (\n range(_v['start'], _v['stop']) if v['py_dtype'] == 'int' and isinstance(_v, dict) and 'start' in _v and 'stop' in _v\n else _v\n )\n ) for _k, _v in v.items()\n }\n )\n for k, v in json.load(f).items()\n })\n exp_schema = OrderedDict({\n k: (\n v if not isinstance(v.get('column_validation'), dict) or 'start' not in v['column_validation']\n else {**v, **{'column_validation': v['dtype_range']}}\n )\n for k, v in exp_schema.items()\n })\n \n res_schema = get_schema('reinsinfo')\n\n self.assertEqual(exp_schema, res_schema)\n\n def test_get_schema__reinsscope(self):\n with io.open(self.reinsscope_json_schema_fp, 'r', encoding='utf-8') as f:\n exp_schema = OrderedDict({\n literal_eval(k): (\n v if not v['dtype_range']\n else {_k: (\n _v if not _k == 'dtype_range'\n else (\n range(_v['start'], _v['stop']) if v['py_dtype'] == 'int' and isinstance(_v, dict) and 'start' in _v and 'stop' in _v\n else _v\n )\n ) for _k, _v in v.items()\n }\n )\n for k, v in json.load(f).items()\n })\n exp_schema = OrderedDict({\n k: (\n v if not isinstance(v.get('column_validation'), dict) or 'start' not in v['column_validation']\n else {**v, **{'column_validation': v['dtype_range']}}\n )\n for k, v in exp_schema.items()\n })\n \n res_schema = get_schema('reinsscope')\n\n self.assertEqual(exp_schema, res_schema)\n\n def test_get_grouped_master_schema(self):\n with io.open(self.master_json_schema_fp, 'r', encoding='utf-8') as f:\n exp_master_schema = OrderedDict({\n literal_eval(k): (\n v if not v['dtype_range']\n else {_k: (\n _v if not _k == 'dtype_range'\n else (\n range(_v['start'], _v['stop']) if v['py_dtype'] == 'int' and isinstance(_v, dict) and 'start' in _v and 'stop' in _v\n else _v\n )\n ) for _k, _v in v.items()\n }\n )\n for k, v in json.load(f).items()\n })\n exp_master_schema = OrderedDict({\n k: (\n v if not isinstance(v.get('column_validation'), dict) or 'start' not in v['column_validation']\n else {**v, **{'column_validation': v['dtype_range']}}\n )\n for k, v in exp_master_schema.items()\n })\n exp_grouped_master_schema = { \n schema_type: { \n item_key[1]: item \n for item_key, item in schema_items \n } \n for schema_type, schema_items in groupby(exp_master_schema.items(), key=lambda it: it[0][0]) \n }\n\n res_grouped_master_schema = get_grouped_master_schema()\n self.assertEqual(exp_grouped_master_schema, res_grouped_master_schema)\n\n def test_get_schema_version(self):\n with io.open(self.schema_version_fp, 'r', encoding='utf-8') as f:\n exp_version = f.readlines()[0].strip()\n\n res_version = get_schema_version()\n self.assertEqual(exp_version, res_version)\n\n @given(\n column=text(alphabet=string.ascii_lowercase, min_size=1, max_size=50)\n )\n def test_get_column_schema__master_schema_type__raises_oed_error(self, column):\n with self.assertRaises(OedError):\n get_column_schema('master', column)\n\n @given(\n schema_type=sampled_from(SCHEMA_TYPES_EX_MASTER),\n column=sampled_from([t[1] for t in ALL]),\n random_str=text(alphabet=string.ascii_lowercase, min_size=1)\n )\n def test_get_column_schema__invalid_schema_type_and_column__raises_non_oed_schema_and_column_error(self, schema_type, column, random_str):\n with self.assertRaises(NonOedSchemaAndColumnError):\n get_column_schema(schema_type + random_str, column + random_str)\n\n @given(\n schema_type=sampled_from(SCHEMA_TYPES_EX_MASTER),\n column=sampled_from([t[1] for t in ALL]),\n random_str=text(alphabet=string.ascii_lowercase, min_size=1)\n )\n def test_get_column_schema__invalid_schema_type_but_valid_column__raises_non_oed_schema_error(self, schema_type, column, random_str):\n with self.assertRaises(NonOedSchemaError):\n get_column_schema(schema_type + random_str, column)\n\n @given(\n schema_type=sampled_from(SCHEMA_TYPES_EX_MASTER)\n )\n def test_get_column_schema__valid_schema_type_but_invalid_schema_column__raises_non_oed_schema_column_error(self, schema_type):\n column = np.random.choice([\n col for stype, col_dict in get_grouped_master_schema().items()\n for col in col_dict\n if stype != schema_type\n ])\n with self.assertRaises(NonOedSchemaColumnError):\n get_column_schema(schema_type, column)\n\n @given(\n schema_type=sampled_from(SCHEMA_TYPES_EX_MASTER),\n column=sampled_from([t[1] for t in ALL]),\n random_str=text(alphabet=string.ascii_lowercase, min_size=1)\n )\n def test_get_column_schema__valid_schema_type_but_invalid_column__raises_non_oed_column_error(self, schema_type, column, random_str):\n with self.assertRaises(NonOedColumnError):\n get_column_schema(schema_type, column + random_str)\n\n @given(\n column=sampled_from(ACC)\n )\n def test_get_column_schema__valid_acc_column(self, column):\n exp_col_schema = self.acc_schema[('acc', column.lower())]\n res_col_schema = get_column_schema('acc', column.lower())\n self.assertEqual(exp_col_schema, res_col_schema)\n\n @given(\n column=sampled_from(LOC)\n )\n def test_get_column_schema__valid_loc_column(self, column):\n exp_col_schema = self.loc_schema[('loc', column.lower())]\n res_col_schema = get_column_schema('loc', column.lower())\n self.assertEqual(exp_col_schema, res_col_schema)\n\n @given(\n column=sampled_from(REINSINFO)\n )\n def test_get_column_schema__valid_reinsinfo_column(self, column):\n exp_col_schema = self.reinsinfo_schema[('reinsinfo', column.lower())]\n res_col_schema = get_column_schema('reinsinfo', column.lower())\n self.assertEqual(exp_col_schema, res_col_schema)\n\n @given(\n column=sampled_from(REINSSCOPE)\n )\n def test_get_column_schema__valid_reinsscope_column(self, column):\n exp_col_schema = self.reinsscope_schema[('reinsscope', column.lower())]\n res_col_schema = get_column_schema('reinsscope', column.lower())\n self.assertEqual(exp_col_schema, res_col_schema)\n\n def test_update_schemas(self):\n values_profile_last_modified = datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'values.json')).st_mtime)\n master_schema_last_modified = datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'master_schema.json')).st_mtime)\n loc_schema_last_modified = datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'loc_schema.json')).st_mtime)\n acc_schema_last_modified = datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'acc_schema.json')).st_mtime)\n reinsinfo_schema_last_modified = datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'reinsinfo_schema.json')).st_mtime)\n reinsscope_schema_last_modified = datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'reinsscope_schema.json')).st_mtime)\n\n update_schemas()\n\n self.assertTrue(\n datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'values.json')).st_mtime) >\n values_profile_last_modified\n )\n self.assertTrue(\n datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'master_schema.json')).st_mtime) >\n master_schema_last_modified\n )\n self.assertTrue(\n datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'loc_schema.json')).st_mtime) >\n loc_schema_last_modified\n )\n self.assertTrue(\n datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'acc_schema.json')).st_mtime) >\n acc_schema_last_modified\n )\n self.assertTrue(\n datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'reinsinfo_schema.json')).st_mtime) >\n reinsinfo_schema_last_modified\n )\n self.assertTrue(\n datetime.fromtimestamp(os.stat(os.path.join(SCHEMA_DIR, 'reinsscope_schema.json')).st_mtime) >\n reinsscope_schema_last_modified\n )\n\n @given(\n schema_key=sampled_from(ALL)\n )\n def test_sample_column(self, schema_key):\n schema_type, header = schema_key\n col_schema = self.master_schema[(schema_type, header)]\n exp_py_dtype, use_range = getattr(builtins, col_schema['py_dtype']), col_schema['column_range'] or col_schema['dtype_range']\n validation_src = col_schema['column_validation']\n validation_func = (\n get_method(validation_src.replace('func:', '')) if isinstance(validation_src, str) and validation_src.startswith('func:')\n else None\n )\n\n sample = sample_column(schema_type, header)\n\n if exp_py_dtype in [int, float] and validation_func is not None:\n self.assertTrue(all(validation_func(use_range, value) for value in sample))\n elif exp_py_dtype is int:\n self.assertTrue(all(value in use_range for value in sample))\n elif exp_py_dtype is float:\n self.assertTrue(all(value >= min(use_range) and value <= max(use_range) for value in sample))\n if exp_py_dtype is str and use_range is not None and validation_func is not None:\n self.assertTrue(all(validation_func(use_range, get_value(value)) for value in sample))\n elif exp_py_dtype is str and use_range is not None:\n self.assertTrue(all(get_value(value) in use_range for value in sample))\n elif exp_py_dtype is str:\n self.assertTrue(all(isinstance(value, str) for value in sample))\n\n"
] | [
[
"pandas.read_csv"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nhh1501/E2E_MLT_VN | [
"dabbc1f7e12b45fbaef965200217cca41793dbc3"
] | [
"train.py"
] | [
"'''\nCreated on Sep 3, 2017\n\n@author: Michal.Busta at gmail.com\n'''\n\nimport torch, os\nimport numpy as np\nimport cv2\ndevice='cuda'\nimport net_utils\nimport data_gen\nfrom data_gen import draw_box_points\nimport timeit\nimport math\nimport random\nimport time\nfrom models import ModelResNetSep2 , ModelResNetSep_final\nimport torch.autograd as autograd\nimport torch.nn.functional as F\n# from torch_baidu_ctc import ctc_loss, CTCLoss\nimport torch.nn as nn\n# from warpctc_pytorch import CTCLoss\nfrom ocr_test_utils import print_seq_ext\nfrom net_eval import evaluate_e2e\n\nimport unicodedata as ud\nimport ocr_gen\nfrom torch import optim\n\nlr_decay = 0.99\nmomentum = 0.9\nweight_decay = 0\nbatch_per_epoch = 1000\ndisp_interval = 100\n\nnorm_height = 44\n\nf = open('codec.txt', 'r',encoding='utf-8')\ncodec = f.readlines()[0]\n#codec = u' !\"#$%&\\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_abcdefghijklmnopqrstuvwxyz{|}~£ÁČĎÉĚÍŇÓŘŠŤÚŮÝŽáčďéěíňóřšťúůýž'\ncodec_rev = {}\nindex = 4\nfor i in range(0, len(codec)):\n codec_rev[codec[i]] = index\n index += 1\nf.close()\n\ndef intersect(a, b):\n '''Determine the intersection of two rectangles'''\n rect = (0,0,0,0)\n r0 = max(a[0],b[0])\n c0 = max(a[1],b[1])\n r1 = min(a[2],b[2])\n c1 = min(a[3],b[3])\n # Do we have a valid intersection?\n if r1 > r0 and c1 > c0: \n rect = (r0,c0,r1,c1)\n return rect\n\ndef union(a, b):\n r0 = min(a[0],b[0])\n c0 = min(a[1],b[1])\n r1 = max(a[2],b[2])\n c1 = max(a[3],b[3])\n return (r0,c0,r1,c1)\n\ndef area(a):\n '''Computes rectangle area'''\n width = a[2] - a[0]\n height = a[3] - a[1]\n return width * height\n \ndef process_boxes(images, im_data, iou_pred, roi_pred, angle_pred, score_maps, gt_idxs, gtso, lbso, features, net, ctc_loss, opts, debug = False):\n \n ctc_loss_count = 0\n loss = torch.from_numpy(np.asarray([0])).type(torch.FloatTensor).to(device)\n \n for bid in range(iou_pred.size(0)):\n \n gts = gtso[bid]\n lbs = lbso[bid]\n \n gt_proc = 0\n gt_good = 0\n \n gts_count = {}\n\n iou_pred_np = iou_pred[bid].data.cpu().numpy()\n iou_map = score_maps[bid]\n to_walk = iou_pred_np.squeeze(0) * iou_map * (iou_pred_np.squeeze(0) > 0.5)\n \n roi_p_bid = roi_pred[bid].data.cpu().numpy()\n gt_idx = gt_idxs[bid]\n \n if debug:\n img = images[bid]\n if opts.normalize:\n img += 1\n img *= 128#\n img = np.asarray(img, dtype=np.uint8)\n \n xy_text = np.argwhere(to_walk > 0)\n random.shuffle(xy_text)\n xy_text = xy_text[0:min(xy_text.shape[0], 100)]\n \n \n for i in range(0, xy_text.shape[0]):\n if opts.geo_type == 1:\n break\n pos = xy_text[i, :]\n \n gt_id = gt_idx[pos[0], pos[1]]\n \n if not gt_id in gts_count:\n gts_count[gt_id] = 0\n \n if gts_count[gt_id] > 2:\n continue\n \n gt = gts[gt_id]\n gt_txt = lbs[gt_id]\n if gt_txt.startswith('##'):\n continue\n \n angle_sin = angle_pred[bid, 0, pos[0], pos[1]] \n angle_cos = angle_pred[bid, 1, pos[0], pos[1]] \n \n angle = math.atan2(angle_sin, angle_cos)\n \n angle_gt = ( math.atan2((gt[2][1] - gt[1][1]), gt[2][0] - gt[1][0]) + math.atan2((gt[3][1] - gt[0][1]), gt[3][0] - gt[0][0]) ) / 2\n \n if math.fabs(angle_gt - angle) > math.pi / 16:\n continue\n \n offset = roi_p_bid[:, pos[0], pos[1]]\n posp = pos + 0.25\n pos_g = np.array([(posp[1] - offset[0] * math.sin(angle)) * 4, (posp[0] - offset[0] * math.cos(angle)) * 4 ])\n pos_g2 = np.array([ (posp[1] + offset[1] * math.sin(angle)) * 4, (posp[0] + offset[1] * math.cos(angle)) * 4 ])\n \n pos_r = np.array([(posp[1] - offset[2] * math.cos(angle)) * 4, (posp[0] - offset[2] * math.sin(angle)) * 4 ])\n pos_r2 = np.array([(posp[1] + offset[3] * math.cos(angle)) * 4, (posp[0] + offset[3] * math.sin(angle)) * 4 ])\n \n center = (pos_g + pos_g2 + pos_r + pos_r2) / 2 - [4*pos[1], 4*pos[0]] \n #center = (pos_g + pos_g2 + pos_r + pos_r2) / 4\n dw = pos_r - pos_r2\n dh = pos_g - pos_g2\n \n w = math.sqrt(dw[0] * dw[0] + dw[1] * dw[1])\n h = math.sqrt(dh[0] * dh[0] + dh[1] * dh[1]) \n \n dhgt = gt[1] - gt[0]\n \n h_gt = math.sqrt(dhgt[0] * dhgt[0] + dhgt[1] * dhgt[1])\n if h_gt < 10:\n continue\n \n rect = ( (center[0], center[1]), (w, h), angle * 180 / math.pi )\n pts = cv2.boxPoints(rect)\n \n pred_bbox = cv2.boundingRect(pts)\n pred_bbox = [pred_bbox[0], pred_bbox[1], pred_bbox[2], pred_bbox[3]]\n pred_bbox[2] += pred_bbox[0]\n pred_bbox[3] += pred_bbox[1]\n \n if gt[:, 0].max() > im_data.size(3) or gt[:, 1].max() > im_data.size(3):\n continue \n \n gt_bbox = [gt[:, 0].min(), gt[:, 1].min(), gt[:, 0].max(), gt[:, 1].max()]\n inter = intersect(pred_bbox, gt_bbox)\n \n uni = union(pred_bbox, gt_bbox)\n ratio = area(inter) / float(area(uni)) \n \n if ratio < 0.90:\n continue\n \n hratio = min(h, h_gt) / max(h, h_gt)\n if hratio < 0.5:\n continue\n \n input_W = im_data.size(3)\n input_H = im_data.size(2)\n target_h = norm_height \n \n scale = target_h / h \n target_gw = (int(w * scale) + target_h // 2)\n target_gw = max(8, int(round(target_gw / 4)) * 4) \n \n #show pooled image in image layer\n \n scalex = (w + h // 2) / input_W \n scaley = h / input_H \n\n \n th11 = scalex * math.cos(angle)\n th12 = -math.sin(angle) * scaley\n th13 = (2 * center[0] - input_W - 1) / (input_W - 1) #* torch.cos(angle_var) - (2 * yc - input_H - 1) / (input_H - 1) * torch.sin(angle_var)\n \n th21 = math.sin(angle) * scalex \n th22 = scaley * math.cos(angle) \n th23 = (2 * center[1] - input_H - 1) / (input_H - 1) #* torch.cos(angle_var) + (2 * xc - input_W - 1) / (input_W - 1) * torch.sin(angle_var)\n \n \n t = np.asarray([th11, th12, th13, th21, th22, th23], dtype=np.float)\n t = torch.from_numpy(t).type(torch.FloatTensor).to(device)\n \n #t = torch.stack((th11, th12, th13, th21, th22, th23), dim=1)\n theta = t.view(-1, 2, 3)\n \n grid = F.affine_grid(theta, torch.Size((1, 3, int(target_h), int(target_gw))))\n \n x = F.grid_sample(im_data[bid].unsqueeze(0), grid)\n\n h2 = 2 * h\n scalex = (w + int(h2)) / input_W\n scaley = h2 / input_H\n\n th11 = scalex * math.cos(angle_gt)\n th12 = -math.sin(angle_gt) * scaley\n th13 = (2 * center[0] - input_W - 1) / (input_W - 1) #* torch.cos(angle_var) - (2 * yc - input_H - 1) / (input_H - 1) * torch.sin(angle_var)\n\n th21 = math.sin(angle_gt) * scalex\n th22 = scaley * math.cos(angle_gt)\n th23 = (2 * center[1] - input_H - 1) / (input_H - 1) #* torch.cos(angle_var) + (2 * xc - input_W - 1) / (input_W - 1) * torch.sin(angle_var)\n\n\n t = np.asarray([th11, th12, th13, th21, th22, th23], dtype=np.float)\n t = torch.from_numpy(t).type(torch.FloatTensor)\n t = t.to(device)\n theta = t.view(-1, 2, 3)\n \n grid2 = F.affine_grid(theta, torch.Size((1, 3, int( 2 * target_h), int(target_gw + target_h ))))\n x2 = F.grid_sample(im_data[bid].unsqueeze(0), grid2)\n \n if debug:\n x_c = x.data.cpu().numpy()[0]\n x_data_draw = x_c.swapaxes(0, 2)\n x_data_draw = x_data_draw.swapaxes(0, 1)\n if opts.normalize:\n x_data_draw += 1\n x_data_draw *= 128#\n x_data_draw = np.asarray(x_data_draw, dtype=np.uint8)\n x_data_draw = x_data_draw[:, :, ::-1]\n \n cv2.circle(img, (int(center[0]), int(center[1])), 5, (0, 255, 0)) \n cv2.imshow('im_data', x_data_draw)\n \n draw_box_points(img, pts)\n draw_box_points(img, gt, color=(0, 0, 255))\n \n cv2.imshow('img', img)\n cv2.waitKey(100)\n \n gt_labels = []\n gt_labels.append( codec_rev[' '] )\n for k in range(len(gt_txt)):\n if gt_txt[k] in codec_rev: \n gt_labels.append( codec_rev[gt_txt[k]] )\n else:\n print('Unknown char: {0}'.format(gt_txt[k]) )\n print(gt_txt)\n gt_labels.append( 3 )\n \n if 'ARABIC' in ud.name(gt_txt[0]):\n gt_labels = gt_labels[::-1]\n gt_labels.append( codec_rev[' '] )\n \n \n features = net.forward_features(x)\n labels_pred = net.forward_ocr(features)\n\n fs2 = net.forward_features(x2)\n offset = (fs2.size(2) - features.size(2)) // 2\n offset2 = (fs2.size(3) - features.size(3)) // 2\n fs2 = fs2[:, :, offset:(features.size(2) + offset), offset2:-offset2]\n labels_pred2 = net.forward_ocr(fs2)\n \n label_length = []\n label_length.append(len(gt_labels))\n probs_sizes = autograd.Variable(torch.IntTensor( [(labels_pred.permute(2,0,1).size()[0])] * (labels_pred.permute(2,0,1).size()[1]) )).long()\n label_sizes = autograd.Variable(torch.IntTensor( torch.from_numpy(np.array(label_length)).int() )).long()\n labels = autograd.Variable(torch.IntTensor( torch.from_numpy(np.array(gt_labels)).int() )).long() \n \n loss = loss + ctc_loss(labels_pred.permute(2,0,1), labels, probs_sizes, label_sizes).to(device)\n loss = loss + ctc_loss(labels_pred2.permute(2,0,1), labels, probs_sizes, label_sizes).to(device)\n ctc_loss_count += 1\n \n if debug:\n ctc_f = labels_pred.data.cpu().numpy()\n ctc_f = ctc_f.swapaxes(1, 2)\n \n labels = ctc_f.argmax(2)\n det_text, conf, dec_s, splits = print_seq_ext(labels[0, :], codec) \n \n print('{0} \\t {1}'.format(det_text, gt_txt))\n \n gts_count[gt_id] += 1\n \n if ctc_loss_count > 64 or debug:\n break\n \n for gt_id in range(0, len(gts)):\n \n gt = gts[gt_id]\n gt_txt = lbs[gt_id]\n \n gt_txt_low = gt_txt.lower()\n if gt_txt.startswith('##'):\n continue\n \n if gt[:, 0].max() > im_data.size(3) or gt[:, 1].max() > im_data.size(3) :\n continue \n \n if gt.min() < 0:\n continue\n \n center = (gt[0, :] + gt[1, :] + gt[2, :] + gt[3, :]) / 4\n dw = gt[2, :] - gt[1, :]\n dh = gt[1, :] - gt[0, :] \n \n w = math.sqrt(dw[0] * dw[0] + dw[1] * dw[1])\n h = math.sqrt(dh[0] * dh[0] + dh[1] * dh[1]) + random.randint(-2, 2)\n \n if h < 8:\n #print('too small h!')\n continue\n \n angle_gt = ( math.atan2((gt[2][1] - gt[1][1]), gt[2][0] - gt[1][0]) + math.atan2((gt[3][1] - gt[0][1]), gt[3][0] - gt[0][0]) ) / 2\n \n input_W = im_data.size(3)\n input_H = im_data.size(2)\n target_h = norm_height \n \n scale = target_h / h \n target_gw = int(w * scale) + random.randint(0, int(target_h)) \n target_gw = max(8, int(round(target_gw / 4)) * 4) \n \n xc = center[0] \n yc = center[1] \n w2 = w \n h2 = h \n \n #show pooled image in image layer\n \n scalex = (w2 + random.randint(0, int(h2))) / input_W \n scaley = h2 / input_H \n \n th11 = scalex * math.cos(angle_gt)\n th12 = -math.sin(angle_gt) * scaley\n th13 = (2 * xc - input_W - 1) / (input_W - 1) #* torch.cos(angle_var) - (2 * yc - input_H - 1) / (input_H - 1) * torch.sin(angle_var)\n \n th21 = math.sin(angle_gt) * scalex \n th22 = scaley * math.cos(angle_gt) \n th23 = (2 * yc - input_H - 1) / (input_H - 1) #* torch.cos(angle_var) + (2 * xc - input_W - 1) / (input_W - 1) * torch.sin(angle_var)\n \n \n t = np.asarray([th11, th12, th13, th21, th22, th23], dtype=np.float)\n t = torch.from_numpy(t).type(torch.FloatTensor)\n t = t.to(device)\n theta = t.view(-1, 2, 3)\n \n grid = F.affine_grid(theta, torch.Size((1, 3, int(target_h ), int(target_gw))))\n x = F.grid_sample(im_data[bid].unsqueeze(0), grid)\n \n #score_sampled = F.grid_sample(iou_pred[bid].unsqueeze(0), grid)\n \n gt_labels = []\n gt_labels.append(codec_rev[' '])\n for k in range(len(gt_txt)):\n if gt_txt[k] in codec_rev: \n gt_labels.append( codec_rev[gt_txt[k]] )\n else:\n print('Unknown char: {0}'.format(gt_txt[k]) )\n print(gt_txt)\n gt_labels.append( 3 )\n gt_labels.append(codec_rev[' '])\n \n if 'ARABIC' in ud.name(gt_txt[0]):\n gt_labels = gt_labels[::-1]\n \n features = net.forward_features(x)\n labels_pred = net.forward_ocr(features)\n \n label_length = []\n label_length.append(len(gt_labels))\n probs_sizes = torch.IntTensor( [(labels_pred.permute(2,0,1).size()[0])] * (labels_pred.permute(2,0,1).size()[1]) ).long()\n label_sizes = torch.IntTensor( torch.from_numpy(np.array(label_length)).int() ).long()\n labels = torch.IntTensor( torch.from_numpy(np.array(gt_labels)).int() ).long()\n \n loss = loss + ctc_loss(labels_pred.permute(2,0,1), labels, probs_sizes, label_sizes).to(device)\n ctc_loss_count += 1\n \n if debug:\n x_d = x.data.cpu().numpy()[0]\n x_data_draw = x_d.swapaxes(0, 2)\n x_data_draw = x_data_draw.swapaxes(0, 1)\n if opts.normalize:\n x_data_draw += 1\n x_data_draw *= 128#\n x_data_draw = np.asarray(x_data_draw, dtype=np.uint8)\n x_data_draw = x_data_draw[:, :, ::-1]\n cv2.imshow('im_data_gt', x_data_draw)\n cv2.waitKey(100)\n \n gt_proc += 1\n if True:\n ctc_f = labels_pred.data.cpu().numpy()\n ctc_f = ctc_f.swapaxes(1, 2)\n \n labels = ctc_f.argmax(2)\n det_text, conf, dec_s, splits = print_seq_ext(labels[0, :], codec) \n if debug:\n print('{0} \\t {1}'.format(det_text, gt_txt))\n if det_text.lower() == gt_txt.lower():\n gt_good += 1\n \n if ctc_loss_count > 128 or debug:\n break \n \n if ctc_loss_count > 0:\n loss /= ctc_loss_count\n \n return loss, gt_good , gt_proc\n \n \ndef main(opts):\n \n model_name = 'E2E-MLT'\n # net = ModelResNetSep2(attention=True)\n net = ModelResNetSep_final(attention=True)\n print(\"Using {0}\".format(model_name))\n \n learning_rate = opts.base_lr\n if opts.cuda:\n net.to(device)\n optimizer = torch.optim.Adam(net.parameters(), lr=opts.base_lr, weight_decay=weight_decay)\n scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode='max',factor=0.5 ,patience=3,verbose=True)\n step_start = 0 \n if os.path.exists(opts.model):\n print('loading model from %s' % args.model)\n # net_dict = net.state_dict()\n step_start, learning_rate = net_utils.load_net(args.model, net, optimizer)\n# step_start, learning_rate = net_utils.load_net(args.model, net, None)\n# \n# step_start = 0\n if opts.cuda:\n net.to(device)\n \n net.train()\n\n data_generator = data_gen.get_batch(num_workers=opts.num_readers, \n input_size=opts.input_size, batch_size=opts.batch_size, \n train_list=opts.train_path, geo_type=opts.geo_type, normalize= opts.normalize)\n \n dg_ocr = ocr_gen.get_batch(num_workers=2,\n batch_size=opts.ocr_batch_size, \n train_list=opts.ocr_feed_list, in_train=True, norm_height=norm_height, rgb=True, normalize= opts.normalize)\n \n train_loss = 0\n train_loss_temp = 0\n bbox_loss, seg_loss, angle_loss = 0., 0., 0.\n cnt = 0\n \n \n \n \n ctc_loss = nn.CTCLoss().to(device)\n # ctc_loss = CTCLoss()\n\n \n \n \n ctc_loss_val = 0\n ctc_loss_val2 = 0\n box_loss_val = 0\n good_all = 0\n gt_all = 0\n train_loss_lr = 0\n ctc_loss_lr = 0\n cntt = 0\n time_total = 0\n now = time.time()\n \n for step in range(step_start, opts.max_iters):\n \n # batch\n images, image_fns, score_maps, geo_maps, training_masks, gtso, lbso, gt_idxs = next(data_generator)\n im_data = net_utils.np_to_variable(images, is_cuda=opts.cuda).permute(0, 3, 1, 2)\n start = timeit.timeit()\n # cv2.imshow('img', images)\n try:\n seg_pred, roi_pred, angle_pred, features = net(im_data)\n except:\n import sys, traceback\n traceback.print_exc(file=sys.stdout)\n continue\n end = timeit.timeit()\n \n # backward\n\n smaps_var = net_utils.np_to_variable(score_maps, is_cuda=opts.cuda)\n training_mask_var = net_utils.np_to_variable(training_masks, is_cuda=opts.cuda)\n angle_gt = net_utils.np_to_variable(geo_maps[:, :, :, 4], is_cuda=opts.cuda)\n geo_gt = net_utils.np_to_variable(geo_maps[:, :, :, [0, 1, 2, 3]], is_cuda=opts.cuda)\n \n try:\n #? loss\n loss = net.loss(seg_pred, smaps_var, training_mask_var, angle_pred, angle_gt, roi_pred, geo_gt)\n except:\n import sys, traceback\n traceback.print_exc(file=sys.stdout)\n continue\n\n #@ loss_val\n if not (torch.isnan(loss) or torch.isinf(loss)):\n train_loss_temp += loss.data.cpu().numpy()\n\n optimizer.zero_grad()\n \n try:\n \n if step > 10000 or True: #this is just extra augumentation step ... in early stage just slows down training\n ctcl, gt_b_good, gt_b_all = process_boxes(images, im_data, seg_pred[0], roi_pred[0], angle_pred[0], score_maps, gt_idxs, gtso, lbso, features, net, ctc_loss, opts, debug=opts.debug)\n \n #? loss\n loss = loss + ctcl\n gt_all += gt_b_all\n good_all += gt_b_good \n \n imageso, labels, label_length = next(dg_ocr)\n im_data_ocr = net_utils.np_to_variable(imageso, is_cuda=opts.cuda).permute(0, 3, 1, 2)\n features = net.forward_features(im_data_ocr)\n labels_pred = net.forward_ocr(features)\n \n probs_sizes = torch.IntTensor( [(labels_pred.permute(2,0,1).size()[0])] * (labels_pred.permute(2,0,1).size()[1]) ).long()\n label_sizes = torch.IntTensor( torch.from_numpy(np.array(label_length)).int() ).long()\n labels = torch.IntTensor( torch.from_numpy(np.array(labels)).int() ).long()\n loss_ocr = ctc_loss(labels_pred.permute(2,0,1), labels, probs_sizes, label_sizes) / im_data_ocr.size(0) * 0.5\n \n loss_ocr.backward()\n #@ loss_val\n # ctc_loss_val2 += loss_ocr.item()\n\n loss.backward()\n\n clipping_value = 0.5\n torch.nn.utils.clip_grad_norm_(net.parameters(), clipping_value)\n if opts.d1:\n print('loss_nan', torch.isnan(loss))\n print('loss_inf', torch.isinf(loss))\n print('lossocr_nan', torch.isnan(loss_ocr))\n print('lossocr_inf', torch.isinf(loss_ocr))\n \n if not (torch.isnan(loss) or torch.isinf(loss) or torch.isnan(loss_ocr) or torch.isinf(loss_ocr)): \n bbox_loss += net.box_loss_value.data.cpu().numpy()\n seg_loss += net.segm_loss_value.data.cpu().numpy()\n angle_loss += net.angle_loss_value.data.cpu().numpy()\n train_loss += train_loss_temp\n ctc_loss_val2 += loss_ocr.item()\n ctc_loss_val += ctcl.data.cpu().numpy()[0]\n # train_loss += loss.data.cpu().numpy()[0] #net.bbox_loss.data.cpu().numpy()[0]\n optimizer.step()\n train_loss_temp = 0\n cnt += 1\n\n except:\n import sys, traceback\n traceback.print_exc(file=sys.stdout)\n pass\n \n \n if step % disp_interval == 0:\n \n if opts.debug:\n \n segm = seg_pred[0].data.cpu()[0].numpy()\n segm = segm.squeeze(0)\n cv2.imshow('segm_map', segm)\n \n segm_res = cv2.resize(score_maps[0], (images.shape[2], images.shape[1]))\n mask = np.argwhere(segm_res > 0)\n \n x_data = im_data.data.cpu().numpy()[0]\n x_data = x_data.swapaxes(0, 2)\n x_data = x_data.swapaxes(0, 1)\n \n if opts.normalize:\n x_data += 1\n x_data *= 128\n x_data = np.asarray(x_data, dtype=np.uint8)\n x_data = x_data[:, :, ::-1]\n \n im_show = x_data\n try:\n im_show[mask[:, 0], mask[:, 1], 1] = 255 \n im_show[mask[:, 0], mask[:, 1], 0] = 0 \n im_show[mask[:, 0], mask[:, 1], 2] = 0\n except:\n pass\n \n cv2.imshow('img0', im_show) \n cv2.imshow('score_maps', score_maps[0] * 255)\n cv2.imshow('train_mask', training_masks[0] * 255)\n cv2.waitKey(10)\n \n \n train_loss /= cnt\n bbox_loss /= cnt\n seg_loss /= cnt\n angle_loss /= cnt\n ctc_loss_val /= cnt\n ctc_loss_val2 /= cnt\n box_loss_val /= cnt\n# train_loss_lr += (ctc_loss_val + ctc_loss_val2 + train_loss)\n train_loss_lr += (train_loss)\n\n cntt += 1\n time_now = time.time() - now\n time_total += time_now\n now = time.time()\n save_log = os.path.join(opts.save_path,'loss.txt')\n # f = open('content/drive/My_Drive/DATA_OCR/backup/ca ca/loss.txt','a')\n f = open(save_log,'a')\n f.write('epoch %d[%d], loss: %.3f, bbox_loss: %.3f, seg_loss: %.3f, ang_loss: %.3f, ctc_loss: %.3f, rec: %.5f, lv2: %.3f, time: %.2f s, cnt: %d\\n' % (\n step / batch_per_epoch, step, train_loss, bbox_loss, seg_loss, angle_loss, ctc_loss_val, good_all / max(1, gt_all), ctc_loss_val2, time_now, cnt))\n f.close()\n try:\n \n print('epoch %d[%d], loss: %.3f, bbox_loss: %.3f, seg_loss: %.3f, ang_loss: %.3f, ctc_loss: %.3f, rec: %.5f, lv2: %.3f, time: %.2f s,, cnt: %d' % (\n step / batch_per_epoch, step, train_loss, bbox_loss, seg_loss, angle_loss, ctc_loss_val, good_all / max(1, gt_all), ctc_loss_val2, time_now, cnt))\n except:\n import sys, traceback\n traceback.print_exc(file=sys.stdout)\n pass\n \n\n train_loss = 0\n bbox_loss, seg_loss, angle_loss = 0., 0., 0.\n cnt = 0\n ctc_loss_val = 0\n ctc_loss_val2 = 0\n good_all = 0\n gt_all = 0\n box_loss_val = 0\n \n #if step % valid_interval == 0:\n # validate(opts.valid_list, net)\n if step > step_start and (step % batch_per_epoch == 0):\n # CER,WER = evaluate_crnn(e2edataloader,net)\n for param_group in optimizer.param_groups:\n learning_rate = param_group['lr']\n print('learning_rate', learning_rate)\n save_name = os.path.join(opts.save_path, '{}_{}.h5'.format(model_name, step))\n state = {'step': step,\n 'learning_rate': learning_rate,\n 'state_dict': net.state_dict(),\n 'optimizer': optimizer.state_dict()}\n torch.save(state, save_name)\n\n # evaluate\n re_tpe2e, re_tp, re_e1, precision = evaluate_e2e(root=args.eval_path, net=net, norm_height=44,\n name_model=save_name, normalize=args.normalize,\n save_dir=args.save_path,save = False)\n # CER,WER = evaluate_crnn(e2edataloader,net)\n\n scheduler.step(re_tpe2e)\n f = open('content/drive/My_Drive/DATA_OCR/backup/ca ca/loss.txt','a')\n f.write('time epoch [%d]: %.2f s, loss_total: %.3f, re_tpe2e = %f, re_tp = %f, re_e1 = %f, precision = %f\\n' % (\n step / batch_per_epoch, time_total, train_loss_lr / cntt, re_tpe2e, re_tp, re_e1, precision))\n f.close()\n print('time epoch [%d]: %.2f s, loss_total: %.3f, re_tpe2e = %f, re_tp = %f, re_e1 = %f, precision = %f' % (\n step / batch_per_epoch, time_total, train_loss_lr / cntt, re_tpe2e, re_tp, re_e1, precision))\n # print('time epoch [%d]: %.2f s, loss_total: %.3f' % (step / batch_per_epoch, time_total,train_loss_lr/cntt))\n print('save model: {}'.format(save_name))\n time_total = 0\n cntt = 0\n train_loss_lr = 0\n # ctc_loss_lr = 0\n\n\nimport argparse\n\nif __name__ == '__main__': \n parser = argparse.ArgumentParser()\n parser.add_argument('-train_path', default='/content/data_mlt')\n parser.add_argument('-ocr_feed_list', default='/content/data_MLT_crop/gt_vi.txt')\n parser.add_argument('-save_path', default='/content/drive/My Drive/DATA_OCR/backup')\n parser.add_argument('-model', default='e2e-mlt.h5')\n parser.add_argument('-debug', type=int, default=0)\n parser.add_argument('-batch_size', type=int, default=8)\n parser.add_argument('-ocr_batch_size', type=int, default=256)\n parser.add_argument('-num_readers', type=int, default=5)\n parser.add_argument('-cuda', type=bool, default=True)\n parser.add_argument('-input_size', type=int, default=512)\n parser.add_argument('-geo_type', type=int, default=0)\n parser.add_argument('-base_lr', type=float, default=0.0001)\n parser.add_argument('-max_iters', type=int, default=5)\n parser.add_argument('-d1', type=int, default=1)\n parser.add_argument('-normalize', type=bool, default=True)\n args = parser.parse_args() \n main(args)\n \n"
] | [
[
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"torch.isinf",
"torch.isnan",
"numpy.asarray",
"torch.from_numpy",
"numpy.argwhere",
"torch.nn.CTCLoss",
"numpy.array",
"torch.save"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nicolasm/lastfm-export | [
"43456161c7083b490d09a1d2638a38c9771e1b3f"
] | [
"lfmpandas/artist_counts.py"
] | [
"from datetime import datetime\n\nimport pandas\n\nfrom queries.artist_counts import get_artist_counts_query_overall, \\\n get_artist_counts_query_year\nfrom lfmdb.lfmdb import select\nfrom lfmconf.lfmconf import get_lastfm_conf\n\nconf = get_lastfm_conf()\nstart_year = conf['lastfm']['service']['startYear']\nnow = datetime.now()\nyears = range(start_year, now.year + 1)\n\nmonths = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct',\n 'Nov', 'Dec']\n\n\ndef retrieve_artist_counts(artist_name):\n queries = get_artist_counts_query_overall()\n\n rows = select(queries[0], (artist_name,))\n df_albums = pandas.DataFrame(rows,\n columns=['AlbumName', 'PlayCount'],\n dtype='int64')\n df_albums = df_albums.sort_values(by='PlayCount')\n\n rows = select(queries[1], (artist_name,))\n df_artist = pandas.DataFrame(rows,\n columns=['Year', 'PlayCount'],\n dtype='int64')\n df_artist['Year'] = df_artist['Year'].astype('int')\n df_artist = df_artist.set_index('Year')\n\n df_artist = df_artist.reindex(years, fill_value=0)\\\n .rename_axis('Year')\\\n .reset_index()\n\n df_artist = df_artist.sort_values(by='Year', ascending=False)\n\n return df_albums, df_artist\n\n\ndef retrieve_artist_counts_for_year(artist_name, year):\n queries = get_artist_counts_query_year()\n\n rows = select(queries[0], (artist_name, year))\n df_albums = pandas.DataFrame(rows,\n columns=['AlbumName', 'PlayCount'],\n dtype='int64')\n df_albums = df_albums.sort_values(by='PlayCount')\n\n rows = select(queries[1], (artist_name, year))\n df_artist = pandas.DataFrame(rows,\n columns=['YearMonth', 'PlayCount'],\n dtype='int64')\n df_artist = df_artist.set_index('YearMonth')\n\n year_months = ['%s-%02d' % (year, m) for m in range(1, 13)]\n\n df_artist = df_artist.reindex(year_months, fill_value=0) \\\n .rename_axis('YearMonth') \\\n .reset_index()\n\n df_artist = df_artist.sort_values(by='YearMonth')\n\n return df_albums, df_artist\n\n\ndef month_name(month):\n return months[month - 1]\n"
] | [
[
"pandas.DataFrame"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
czyssrs/Logic2Text | [
"b17c931844513f0ff08c8a800fc7968899d69912"
] | [
"gpt_base/utils.py"
] | [
"import time, os, sys, shutil, io, subprocess, re\nimport tensorflow as tf\nimport numpy as np\nimport zipfile\nimport json\nimport copy\n\n# Progress bar\n\nTOTAL_BAR_LENGTH = 100.\nlast_time = time.time()\nbegin_time = last_time\nprint(os.popen('stty size', 'r').read())\n_, term_width = os.popen('stty size', 'r').read().split()\nterm_width = int(term_width)\n\n\ndef bleu_score(labels_file, predictions_path):\n bleu_script = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'multi-bleu.perl')\n try:\n with io.open(predictions_path, encoding=\"utf-8\", mode=\"r\") as predictions_file:\n bleu_out = subprocess.check_output(\n [bleu_script, labels_file],\n stdin=predictions_file,\n stderr=subprocess.STDOUT)\n bleu_out = bleu_out.decode(\"utf-8\")\n bleu_score = re.search(r\"BLEU = (.+?),\", bleu_out).group(1)\n print(bleu_score)\n return float(bleu_score)\n\n except subprocess.CalledProcessError as error:\n if error.output is not None:\n msg = error.output.strip()\n tf.logging.warning(\n \"{} script returned non-zero exit code: {}\".format(bleu_script, msg))\n return None\n\ndef read_word2vec_zip(word2vec_file):\n wordvec_map = {}\n num_words = 0\n dimension = 0\n zfile = zipfile.ZipFile(word2vec_file)\n for finfo in zfile.infolist():\n ifile = zfile.open(finfo)\n for line in ifile:\n line = line.strip()\n #print line\n entries = line.split(' ')\n if len(entries) == 2:\n continue\n word = entries[0].strip()\n vec = map(float, entries[1:])\n\n if word in wordvec_map:\n print (\"Invalid word in embedding. Does not matter.\")\n continue\n assert dimension == 0 or dimension == len(vec)\n\n wordvec_map[word] = np.array(vec)\n num_words += 1\n dimension = len(vec)\n\n return wordvec_map, num_words, dimension\n\ndef read_word2vec(word2vec_file):\n wordvec_map = {}\n num_words = 0\n dimension = 0\n with open(word2vec_file, \"r\") as f:\n for line in f:\n line = line.strip()\n #print line\n entries = line.split(' ')\n if len(entries) == 2:\n continue\n word = entries[0].strip()\n vec = map(float, entries[1:])\n\n if word in wordvec_map:\n print (\"Invalid word in embedding. Does not matter.\")\n continue\n # assert word not in wordvec_map\n assert dimension == 0 or dimension == len(vec)\n\n wordvec_map[word] = np.array(vec)\n num_words += 1\n dimension = len(vec)\n\n return wordvec_map, num_words, dimension\n\ndef load_vocab(vocab_file):\n vocab = {}\n\n vocab['<_PAD>'] = 0\n vocab['<_START_TOKEN>'] = 1\n vocab['<_END_TOKEN>'] = 2\n vocab['<_UNK_TOKEN>'] = 3\n\n cnt = 4\n with open(vocab_file, \"r\") as v:\n for line in v:\n if len(line.strip().split()) > 1:\n word = line.strip().split()[0]\n ori_id = int(line.strip().split()[1])\n if word not in vocab:\n vocab[word] = (cnt + ori_id)\n\n return vocab\n\ndef create_init_embedding(vocab_file, extend_vocab_size, word2vec_file, emblen):\n '''\n create initial embedding for text relation words.\n words not in word2vec file initialized to random.\n\n key_map['PAD'] = 0\n key_map['START_TOKEN'] = 1\n key_map['END_TOKEN'] = 2\n key_map['UNK_TOKEN'] = 3\n '''\n\n vocab = load_vocab(vocab_file)\n print(\"vocab len: \", len(vocab))\n\n init_embedding = np.random.uniform(-np.sqrt(3), np.sqrt(3), size = (len(vocab) + extend_vocab_size, emblen))\n\n if word2vec_file.endswith('.gz'):\n word2vec_map = KeyedVectors.load_word2vec_format(word2vec_file, binary=True)\n elif word2vec_file.endswith('.zip'):\n word2vec_map, num_words, dimension = read_word2vec_zip(word2vec_file)\n else:\n word2vec_map, num_words, dimension = read_word2vec(word2vec_file)\n\n num_covered = 0\n\n for word in vocab:\n if word in word2vec_map:\n vec = word2vec_map[word]\n if len(vec) != emblen:\n raise ValueError(\"word2vec dimension doesn't match.\")\n init_embedding[vocab[word], :] = vec\n num_covered += 1\n\n unk_vec = init_embedding[3, :]\n for ind in range(len(vocab), len(init_embedding)):\n init_embedding[ind, :] = unk_vec\n\n ## embedding for pad\n # init_embedding[0][:] = np.zeros(emblen)\n\n print (\"word2vec covered: %d\" % num_covered)\n return init_embedding\n\ndef progress_bar(current, total, msg=None):\n global last_time, begin_time\n if current == 0:\n begin_time = time.time() # Reset for new bar.\n\n cur_len = int(TOTAL_BAR_LENGTH*current/total)\n rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1\n\n sys.stdout.write(' [')\n for i in range(cur_len):\n sys.stdout.write('=')\n sys.stdout.write('>')\n for i in range(rest_len):\n sys.stdout.write('.')\n sys.stdout.write(']')\n\n cur_time = time.time()\n step_time = cur_time - last_time\n last_time = cur_time\n tot_time = cur_time - begin_time\n\n L = []\n L.append(' Step: %s' % format_time(step_time))\n L.append(' | Tot: %s' % format_time(tot_time))\n if msg:\n L.append(' | ' + msg)\n\n msg = ''.join(L)\n sys.stdout.write(msg)\n for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):\n sys.stdout.write(' ')\n\n # Go back to the center of the bar.\n for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):\n sys.stdout.write('\\b')\n sys.stdout.write(' %d/%d ' % (current+1, total))\n\n if current < total-1:\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n sys.stdout.flush()\n\n\ndef format_time(seconds):\n days = int(seconds / 3600/24)\n seconds = seconds - days*3600*24\n hours = int(seconds / 3600)\n seconds = seconds - hours*3600\n minutes = int(seconds / 60)\n seconds = seconds - minutes*60\n secondsf = int(seconds)\n seconds = seconds - secondsf\n millis = int(seconds*1000)\n\n f = ''\n i = 1\n if days > 0:\n f += str(days) + 'D'\n i += 1\n if hours > 0 and i <= 2:\n f += str(hours) + 'h'\n i += 1\n if minutes > 0 and i <= 2:\n f += str(minutes) + 'm'\n i += 1\n if secondsf > 0 and i <= 2:\n f += str(secondsf) + 's'\n i += 1\n if millis > 0 and i <= 2:\n f += str(millis) + 'ms'\n i += 1\n if f == '':\n f = '0ms'\n return f\n\n\ndef write_word(pred_list, save_dir, name):\n ss = open(save_dir + name, \"w+\")\n for item in pred_list:\n ss.write(\" \".join(item) + '\\n')\n\n\ndef get_current_git_version():\n import git\n repo = git.Repo(search_parent_directories=True)\n sha = repo.head.object.hexsha\n return sha\n\n\ndef write_log(log_file, s):\n print(s)\n with open(log_file, 'a') as f:\n f.write(s+'\\n')\n\n#### for rouge error:\ndef check_res(res):\n if res.strip() == \"\":\n return False\n for token in res.strip():\n if token.isalpha():\n return True\n\n return False\n\ndef make_html_safe(s):\n \"\"\"Replace any angled brackets in string s to avoid interfering with HTML attention visualizer.\"\"\"\n s.replace(\"<\", \"<\")\n s.replace(\">\", \">\")\n return s\n\ndef get_rouge(res_sum):\n\n sum_words = res_sum.split(\" \")\n sum_sents = []\n while len(sum_words) > 0:\n try:\n fst_period_idx = sum_words.index(\".\")\n except ValueError: # there is text remaining that doesn't end in \".\"\n fst_period_idx = len(sum_words)\n sent = sum_words[:fst_period_idx+1] # sentence up to and including the period\n sum_words = sum_words[fst_period_idx+1:] # everything else\n sum_sents.append(' '.join(sent))\n\n sum_sents = [make_html_safe(w) for w in sum_sents]\n\n return sum_sents\n\ndef get_res(ori_file, gen_file, out_file):\n\n f_ori = open(ori_file)\n f_gen = open(gen_file)\n\n data = json.load(f_ori)\n generated = f_gen.readlines()\n\n assert len(data) == len(generated)\n\n res = []\n\n for tup_ori, tup_gen in zip(data, generated):\n\n to_write = copy.deepcopy(tup_ori)\n to_write[\"generated\"] = tup_gen.strip()\n res.append(to_write)\n\n with open(out_file, \"w\") as f_out:\n f_out.write(json.dumps(res, indent=4))\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n"
] | [
[
"numpy.array",
"numpy.sqrt"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
samjabrahams/tensorflow-workshop | [
"4f1598a342954977e949ce0f5bd65b5994f0bec3"
] | [
"inception_retrain.py"
] | [
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Simple transfer learning with an Inception v3 architecture model which\ndisplays summaries in TensorBoard.\n\nThis example shows how to take a Inception v3 architecture model trained on\nImageNet images, and train a new top layer that can recognize other classes of\nimages.\n\nThe top layer receives as input a 2048-dimensional vector for each image. We\ntrain a softmax layer on top of this representation. Assuming the softmax layer\ncontains N labels, this corresponds to learning N + 2048*N model parameters\ncorresponding to the learned biases and weights.\n\nHere's an example, which assumes you have a folder containing class-named\nsubfolders, each full of images for each label. The example folder flower_photos\nshould have a structure like this:\n\n~/flower_photos/daisy/photo1.jpg\n~/flower_photos/daisy/photo2.jpg\n...\n~/flower_photos/rose/anotherphoto77.jpg\n...\n~/flower_photos/sunflower/somepicture.jpg\n\nThe subfolder names are important, since they define what label is applied to\neach image, but the filenames themselves don't matter. Once your images are\nprepared, you can run the training with a command like this:\n\nbazel build third_party/tensorflow/examples/image_retraining:retrain && \\\nbazel-bin/third_party/tensorflow/examples/image_retraining/retrain \\\n--image_dir ~/flower_photos\n\nYou can replace the image_dir argument with any folder containing subfolders of\nimages. The label for each image is taken from the name of the subfolder it's\nin.\n\nThis produces a new model file that can be loaded and run by any TensorFlow\nprogram, for example the label_image sample code.\n\n\nTo use with TensorBoard:\n\nBy default, this script will log summaries to /tmp/retrain_logs directory\n\nVisualize the summaries with this command:\n\ntensorboard --logdir /tmp/retrain_logs\n\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport glob\nimport hashlib\nimport os.path\nimport random\nimport re\nimport sys\nimport tarfile\n\nimport numpy as np\nfrom six.moves import urllib\nimport tensorflow as tf\n\nfrom tensorflow.python.framework import graph_util\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.platform import gfile\n\n\nimport struct\n\nFLAGS = tf.app.flags.FLAGS\n\n# Input and output file flags.\ntf.app.flags.DEFINE_string('image_dir', '',\n \"\"\"Path to folders of labeled images.\"\"\")\ntf.app.flags.DEFINE_string('output_graph', '/tmp/output_graph.pb',\n \"\"\"Where to save the trained graph.\"\"\")\ntf.app.flags.DEFINE_string('output_labels', '/tmp/output_labels.txt',\n \"\"\"Where to save the trained graph's labels.\"\"\")\ntf.app.flags.DEFINE_string('summaries_dir', '/tmp/retrain_logs',\n \"\"\"Where to save summary logs for TensorBoard.\"\"\")\n\n# Details of the training configuration.\ntf.app.flags.DEFINE_integer('how_many_training_steps', 4000,\n \"\"\"How many training steps to run before ending.\"\"\")\ntf.app.flags.DEFINE_float('learning_rate', 0.01,\n \"\"\"How large a learning rate to use when training.\"\"\")\ntf.app.flags.DEFINE_integer(\n 'testing_percentage', 10,\n \"\"\"What percentage of images to use as a test set.\"\"\")\ntf.app.flags.DEFINE_integer(\n 'validation_percentage', 10,\n \"\"\"What percentage of images to use as a validation set.\"\"\")\ntf.app.flags.DEFINE_integer('eval_step_interval', 10,\n \"\"\"How often to evaluate the training results.\"\"\")\ntf.app.flags.DEFINE_integer('train_batch_size', 100,\n \"\"\"How many images to train on at a time.\"\"\")\ntf.app.flags.DEFINE_integer('test_batch_size', 500,\n \"\"\"How many images to test on at a time. This\"\"\"\n \"\"\" test set is only used infrequently to verify\"\"\"\n \"\"\" the overall accuracy of the model.\"\"\")\ntf.app.flags.DEFINE_integer(\n 'validation_batch_size', 100,\n \"\"\"How many images to use in an evaluation batch. This validation set is\"\"\"\n \"\"\" used much more often than the test set, and is an early indicator of\"\"\"\n \"\"\" how accurate the model is during training.\"\"\")\n\n# File-system cache locations.\ntf.app.flags.DEFINE_string('model_dir', '/tmp/imagenet',\n \"\"\"Path to classify_image_graph_def.pb, \"\"\"\n \"\"\"imagenet_synset_to_human_label_map.txt, and \"\"\"\n \"\"\"imagenet_2012_challenge_label_map_proto.pbtxt.\"\"\")\ntf.app.flags.DEFINE_string(\n 'bottleneck_dir', '/tmp/bottleneck',\n \"\"\"Path to cache bottleneck layer values as files.\"\"\")\ntf.app.flags.DEFINE_string('final_tensor_name', 'final_result',\n \"\"\"The name of the output classification layer in\"\"\"\n \"\"\" the retrained graph.\"\"\")\n\n# Controls the distortions used during training.\ntf.app.flags.DEFINE_boolean(\n 'flip_left_right', False,\n \"\"\"Whether to randomly flip half of the training images horizontally.\"\"\")\ntf.app.flags.DEFINE_integer(\n 'random_crop', 0,\n \"\"\"A percentage determining how much of a margin to randomly crop off the\"\"\"\n \"\"\" training images.\"\"\")\ntf.app.flags.DEFINE_integer(\n 'random_scale', 0,\n \"\"\"A percentage determining how much to randomly scale up the size of the\"\"\"\n \"\"\" training images by.\"\"\")\ntf.app.flags.DEFINE_integer(\n 'random_brightness', 0,\n \"\"\"A percentage determining how much to randomly multiply the training\"\"\"\n \"\"\" image input pixels up or down by.\"\"\")\n\n# These are all parameters that are tied to the particular model architecture\n# we're using for Inception v3. These include things like tensor names and their\n# sizes. If you want to adapt this script to work with another model, you will\n# need to update these to reflect the values in the network you're using.\n# pylint: disable=line-too-long\nDATA_URL = 'http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz'\n# pylint: enable=line-too-long\nBOTTLENECK_TENSOR_NAME = 'pool_3/_reshape:0'\nBOTTLENECK_TENSOR_SIZE = 2048\nMODEL_INPUT_WIDTH = 299\nMODEL_INPUT_HEIGHT = 299\nMODEL_INPUT_DEPTH = 3\nJPEG_DATA_TENSOR_NAME = 'DecodeJpeg/contents:0'\nRESIZED_INPUT_TENSOR_NAME = 'ResizeBilinear:0'\n\n\ndef create_image_lists(image_dir, testing_percentage, validation_percentage):\n \"\"\"Builds a list of training images from the file system.\n\n Analyzes the sub folders in the image directory, splits them into stable\n training, testing, and validation sets, and returns a data structure\n describing the lists of images for each label and their paths.\n\n Args:\n image_dir: String path to a folder containing subfolders of images.\n testing_percentage: Integer percentage of the images to reserve for tests.\n validation_percentage: Integer percentage of images reserved for validation.\n\n Returns:\n A dictionary containing an entry for each label subfolder, with images split\n into training, testing, and validation sets within each label.\n \"\"\"\n if not gfile.Exists(image_dir):\n print(\"Image directory '\" + image_dir + \"' not found.\")\n return None\n result = {}\n sub_dirs = [x[0] for x in os.walk(image_dir)]\n # The root directory comes first, so skip it.\n is_root_dir = True\n for sub_dir in sub_dirs:\n if is_root_dir:\n is_root_dir = False\n continue\n extensions = ['jpg', 'jpeg', 'JPG', 'JPEG']\n file_list = []\n dir_name = os.path.basename(sub_dir)\n if dir_name == image_dir:\n continue\n print(\"Looking for images in '\" + dir_name + \"'\")\n for extension in extensions:\n file_glob = os.path.join(image_dir, dir_name, '*.' + extension)\n file_list.extend(glob.glob(file_glob))\n if not file_list:\n print('No files found')\n continue\n if len(file_list) < 20:\n print('WARNING: Folder has less than 20 images, which may cause issues.')\n label_name = re.sub(r'[^a-z0-9]+', ' ', dir_name.lower())\n training_images = []\n testing_images = []\n validation_images = []\n for file_name in file_list:\n base_name = os.path.basename(file_name)\n # We want to ignore anything after '_nohash_' in the file name when\n # deciding which set to put an image in, the data set creator has a way of\n # grouping photos that are close variations of each other. For example\n # this is used in the plant disease data set to group multiple pictures of\n # the same leaf.\n hash_name = re.sub(r'_nohash_.*$', '', file_name)\n # This looks a bit magical, but we need to decide whether this file should\n # go into the training, testing, or validation sets, and we want to keep\n # existing files in the same set even if more files are subsequently\n # added.\n # To do that, we need a stable way of deciding based on just the file name\n # itself, so we do a hash of that and then use that to generate a\n # probability value that we use to assign it.\n hash_name_hashed = hashlib.sha1(hash_name.encode('utf-8')).hexdigest()\n percentage_hash = (int(hash_name_hashed, 16) % (65536)) * (100 / 65535.0)\n if percentage_hash < validation_percentage:\n validation_images.append(base_name)\n elif percentage_hash < (testing_percentage + validation_percentage):\n testing_images.append(base_name)\n else:\n training_images.append(base_name)\n result[label_name] = {\n 'dir': dir_name,\n 'training': training_images,\n 'testing': testing_images,\n 'validation': validation_images,\n }\n return result\n\n\ndef get_image_path(image_lists, label_name, index, image_dir, category):\n \"\"\"\"Returns a path to an image for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Int offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n\n \"\"\"\n if label_name not in image_lists:\n tf.logging.fatal('Label does not exist %s.', label_name)\n label_lists = image_lists[label_name]\n if category not in label_lists:\n tf.logging.fatal('Category does not exist %s.', category)\n category_list = label_lists[category]\n if not category_list:\n tf.logging.fatal('Category has no images - %s.', category)\n mod_index = index % len(category_list)\n base_name = category_list[mod_index]\n sub_dir = label_lists['dir']\n full_path = os.path.join(image_dir, sub_dir, base_name)\n return full_path\n\n\ndef get_bottleneck_path(image_lists, label_name, index, bottleneck_dir,\n category):\n \"\"\"\"Returns a path to a bottleneck file for a label at the given index.\n\n Args:\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be moduloed by the\n available number of images for the label, so it can be arbitrarily large.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n category: Name string of set to pull images from - training, testing, or\n validation.\n\n Returns:\n File system path string to an image that meets the requested parameters.\n \"\"\"\n return get_image_path(image_lists, label_name, index, bottleneck_dir,\n category) + '.txt'\n\n\ndef create_inception_graph():\n \"\"\"\"Creates a graph from saved GraphDef file and returns a Graph object.\n\n Returns:\n Graph holding the trained Inception network, and various tensors we'll be\n manipulating.\n \"\"\"\n with tf.Session() as sess:\n model_filename = os.path.join(\n FLAGS.model_dir, 'classify_image_graph_def.pb')\n with gfile.FastGFile(model_filename, 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n bottleneck_tensor, jpeg_data_tensor, resized_input_tensor = (\n tf.import_graph_def(graph_def, name='', return_elements=[\n BOTTLENECK_TENSOR_NAME, JPEG_DATA_TENSOR_NAME,\n RESIZED_INPUT_TENSOR_NAME]))\n return sess.graph, bottleneck_tensor, jpeg_data_tensor, resized_input_tensor\n\n\ndef run_bottleneck_on_image(sess, image_data, image_data_tensor,\n bottleneck_tensor):\n \"\"\"Runs inference on an image to extract the 'bottleneck' summary layer.\n\n Args:\n sess: Current active TensorFlow Session.\n image_data: Numpy array of image data.\n image_data_tensor: Input data layer in the graph.\n bottleneck_tensor: Layer before the final softmax.\n\n Returns:\n Numpy array of bottleneck values.\n \"\"\"\n bottleneck_values = sess.run(\n bottleneck_tensor,\n {image_data_tensor: image_data})\n bottleneck_values = np.squeeze(bottleneck_values)\n return bottleneck_values\n\n\ndef maybe_download_and_extract():\n \"\"\"Download and extract model tar file.\n\n If the pretrained model we're using doesn't already exist, this function\n downloads it from the TensorFlow.org website and unpacks it into a directory.\n \"\"\"\n dest_directory = FLAGS.model_dir\n if not os.path.exists(dest_directory):\n os.makedirs(dest_directory)\n filename = DATA_URL.split('/')[-1]\n filepath = os.path.join(dest_directory, filename)\n if not os.path.exists(filepath):\n\n def _progress(count, block_size, total_size):\n sys.stdout.write('\\r>> Downloading %s %.1f%%' %\n (filename,\n float(count * block_size) / float(total_size) * 100.0))\n sys.stdout.flush()\n\n filepath, _ = urllib.request.urlretrieve(DATA_URL,\n filepath,\n _progress)\n print()\n statinfo = os.stat(filepath)\n print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')\n tarfile.open(filepath, 'r:gz').extractall(dest_directory)\n\n\ndef ensure_dir_exists(dir_name):\n \"\"\"Makes sure the folder exists on disk.\n\n Args:\n dir_name: Path string to the folder we want to create.\n \"\"\"\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n\n\ndef write_list_of_floats_to_file(list_of_floats , file_path):\n \"\"\"Writes a given list of floats to a binary file.\n\n Args:\n list_of_floats: List of floats we want to write to a file.\n file_path: Path to a file where list of floats will be stored.\n\n \"\"\"\n\n s = struct.pack('d' * BOTTLENECK_TENSOR_SIZE, *list_of_floats)\n with open(file_path, 'wb') as f:\n f.write(s)\n\n\ndef read_list_of_floats_from_file(file_path):\n \"\"\"Reads list of floats from a given file.\n\n Args:\n file_path: Path to a file where list of floats was stored.\n Returns:\n Array of bottleneck values (list of floats).\n\n \"\"\"\n\n with open(file_path, 'rb') as f:\n s = struct.unpack('d' * BOTTLENECK_TENSOR_SIZE, f.read())\n return list(s)\n\n\nbottleneck_path_2_bottleneck_values = {}\n\n\ndef get_or_create_bottleneck(sess, image_lists, label_name, index, image_dir,\n category, bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor):\n \"\"\"Retrieves or calculates bottleneck values for an image.\n\n If a cached version of the bottleneck data exists on-disk, return that,\n otherwise calculate the data and save it to disk for future use.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n label_name: Label string we want to get an image for.\n index: Integer offset of the image we want. This will be modulo-ed by the\n available number of images for the label, so it can be arbitrarily large.\n image_dir: Root folder string of the subfolders containing the training\n images.\n category: Name string of which set to pull images from - training, testing,\n or validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: The tensor to feed loaded jpeg data into.\n bottleneck_tensor: The output tensor for the bottleneck values.\n\n Returns:\n Numpy array of values produced by the bottleneck layer for the image.\n \"\"\"\n label_lists = image_lists[label_name]\n sub_dir = label_lists['dir']\n sub_dir_path = os.path.join(bottleneck_dir, sub_dir)\n ensure_dir_exists(sub_dir_path)\n bottleneck_path = get_bottleneck_path(image_lists, label_name, index,\n bottleneck_dir, category)\n if not os.path.exists(bottleneck_path):\n image_path = get_image_path(image_lists, label_name, index, image_dir,\n category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n image_data = gfile.FastGFile(image_path, 'rb').read()\n bottleneck_values = run_bottleneck_on_image(sess, image_data,\n jpeg_data_tensor,\n bottleneck_tensor)\n bottleneck_string = ','.join(str(x) for x in bottleneck_values)\n with open(bottleneck_path, 'w') as bottleneck_file:\n bottleneck_file.write(bottleneck_string)\n\n with open(bottleneck_path, 'r') as bottleneck_file:\n bottleneck_string = bottleneck_file.read()\n bottleneck_values = [float(x) for x in bottleneck_string.split(',')]\n return bottleneck_values\n\n\ndef cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,\n jpeg_data_tensor, bottleneck_tensor):\n \"\"\"Ensures all the training, testing, and validation bottlenecks are cached.\n\n Because we're likely to read the same image multiple times (if there are no\n distortions applied during training) it can speed things up a lot if we\n calculate the bottleneck layer values once for each image during\n preprocessing, and then just read those cached values repeatedly during\n training. Here we go through all the images we've found, calculate those\n values, and save them off.\n\n Args:\n sess: The current active TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n image_dir: Root folder string of the subfolders containing the training\n images.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n jpeg_data_tensor: Input tensor for jpeg data from file.\n bottleneck_tensor: The penultimate output layer of the graph.\n\n Returns:\n Nothing.\n \"\"\"\n how_many_bottlenecks = 0\n ensure_dir_exists(bottleneck_dir)\n for label_name, label_lists in image_lists.items():\n for category in ['training', 'testing', 'validation']:\n category_list = label_lists[category]\n print('Creating bottlenecks.')\n for index, unused_base_name in enumerate(category_list):\n get_or_create_bottleneck(sess, image_lists, label_name, index,\n image_dir, category, bottleneck_dir,\n jpeg_data_tensor, bottleneck_tensor)\n how_many_bottlenecks += 1\n if how_many_bottlenecks % 100 == 0:\n print(str(how_many_bottlenecks) + ' bottleneck files created.')\n\n\ndef get_random_cached_bottlenecks(sess, image_lists, how_many, category,\n bottleneck_dir, image_dir, jpeg_data_tensor,\n bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for cached images.\n\n If no distortions are being applied, this function can retrieve the cached\n bottleneck values directly from disk for images. It picks a random set of\n images from the specified category.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: The number of bottleneck values to return.\n category: Name string of which set to pull from - training, testing, or\n validation.\n bottleneck_dir: Folder string holding cached files of bottleneck values.\n image_dir: Root folder string of the subfolders containing the training\n images.\n jpeg_data_tensor: The layer to feed jpeg image data into.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays and their corresponding ground truths.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(65536)\n bottleneck = get_or_create_bottleneck(sess, image_lists, label_name,\n image_index, image_dir, category,\n bottleneck_dir, jpeg_data_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n return bottlenecks, ground_truths\n\n\ndef get_random_distorted_bottlenecks(\n sess, image_lists, how_many, category, image_dir, input_jpeg_tensor,\n distorted_image, resized_input_tensor, bottleneck_tensor):\n \"\"\"Retrieves bottleneck values for training images, after distortions.\n\n If we're training with distortions like crops, scales, or flips, we have to\n recalculate the full model for every image, and so we can't use cached\n bottleneck values. Instead we find random images for the requested category,\n run them through the distortion graph, and then the full graph to get the\n bottleneck results for each.\n\n Args:\n sess: Current TensorFlow Session.\n image_lists: Dictionary of training images for each label.\n how_many: The integer number of bottleneck values to return.\n category: Name string of which set of images to fetch - training, testing,\n or validation.\n image_dir: Root folder string of the subfolders containing the training\n images.\n input_jpeg_tensor: The input layer we feed the image data to.\n distorted_image: The output node of the distortion graph.\n resized_input_tensor: The input node of the recognition graph.\n bottleneck_tensor: The bottleneck output layer of the CNN graph.\n\n Returns:\n List of bottleneck arrays and their corresponding ground truths.\n \"\"\"\n class_count = len(image_lists.keys())\n bottlenecks = []\n ground_truths = []\n for unused_i in range(how_many):\n label_index = random.randrange(class_count)\n label_name = list(image_lists.keys())[label_index]\n image_index = random.randrange(65536)\n image_path = get_image_path(image_lists, label_name, image_index, image_dir,\n category)\n if not gfile.Exists(image_path):\n tf.logging.fatal('File does not exist %s', image_path)\n jpeg_data = gfile.FastGFile(image_path, 'rb').read()\n # Note that we materialize the distorted_image_data as a numpy array before\n # sending running inference on the image. This involves 2 memory copies and\n # might be optimized in other implementations.\n distorted_image_data = sess.run(distorted_image,\n {input_jpeg_tensor: jpeg_data})\n bottleneck = run_bottleneck_on_image(sess, distorted_image_data,\n resized_input_tensor,\n bottleneck_tensor)\n ground_truth = np.zeros(class_count, dtype=np.float32)\n ground_truth[label_index] = 1.0\n bottlenecks.append(bottleneck)\n ground_truths.append(ground_truth)\n return bottlenecks, ground_truths\n\n\ndef should_distort_images(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Whether any distortions are enabled, from the input flags.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n\n Returns:\n Boolean value indicating whether any distortions should be applied.\n \"\"\"\n return (flip_left_right or (random_crop != 0) or (random_scale != 0) or\n (random_brightness != 0))\n\n\ndef add_input_distortions(flip_left_right, random_crop, random_scale,\n random_brightness):\n \"\"\"Creates the operations to apply the specified distortions.\n\n During training it can help to improve the results if we run the images\n through simple distortions like crops, scales, and flips. These reflect the\n kind of variations we expect in the real world, and so can help train the\n model to cope with natural data more effectively. Here we take the supplied\n parameters and construct a network of operations to apply them to an image.\n\n Cropping\n ~~~~~~~~\n\n Cropping is done by placing a bounding box at a random position in the full\n image. The cropping parameter controls the size of that box relative to the\n input image. If it's zero, then the box is the same size as the input and no\n cropping is performed. If the value is 50%, then the crop box will be half the\n width and height of the input. In a diagram it looks like this:\n\n < width >\n +---------------------+\n | |\n | width - crop% |\n | < > |\n | +------+ |\n | | | |\n | | | |\n | | | |\n | +------+ |\n | |\n | |\n +---------------------+\n\n Scaling\n ~~~~~~~\n\n Scaling is a lot like cropping, except that the bounding box is always\n centered and its size varies randomly within the given range. For example if\n the scale percentage is zero, then the bounding box is the same size as the\n input and no scaling is applied. If it's 50%, then the bounding box will be in\n a random range between half the width and height and full size.\n\n Args:\n flip_left_right: Boolean whether to randomly mirror images horizontally.\n random_crop: Integer percentage setting the total margin used around the\n crop box.\n random_scale: Integer percentage of how much to vary the scale by.\n random_brightness: Integer range to randomly multiply the pixel values by.\n graph.\n\n Returns:\n The jpeg input layer and the distorted result tensor.\n \"\"\"\n\n jpeg_data = tf.placeholder(tf.string, name='DistortJPGInput')\n decoded_image = tf.image.decode_jpeg(jpeg_data, channels=MODEL_INPUT_DEPTH)\n decoded_image_as_float = tf.cast(decoded_image, dtype=tf.float32)\n decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)\n margin_scale = 1.0 + (random_crop / 100.0)\n resize_scale = 1.0 + (random_scale / 100.0)\n margin_scale_value = tf.constant(margin_scale)\n resize_scale_value = tf.random_uniform(tensor_shape.scalar(),\n minval=1.0,\n maxval=resize_scale)\n scale_value = tf.mul(margin_scale_value, resize_scale_value)\n precrop_width = tf.mul(scale_value, MODEL_INPUT_WIDTH)\n precrop_height = tf.mul(scale_value, MODEL_INPUT_HEIGHT)\n precrop_shape = tf.pack([precrop_height, precrop_width])\n precrop_shape_as_int = tf.cast(precrop_shape, dtype=tf.int32)\n precropped_image = tf.image.resize_bilinear(decoded_image_4d,\n precrop_shape_as_int)\n precropped_image_3d = tf.squeeze(precropped_image, squeeze_dims=[0])\n cropped_image = tf.random_crop(precropped_image_3d,\n [MODEL_INPUT_HEIGHT, MODEL_INPUT_WIDTH,\n MODEL_INPUT_DEPTH])\n if flip_left_right:\n flipped_image = tf.image.random_flip_left_right(cropped_image)\n else:\n flipped_image = cropped_image\n brightness_min = 1.0 - (random_brightness / 100.0)\n brightness_max = 1.0 + (random_brightness / 100.0)\n brightness_value = tf.random_uniform(tensor_shape.scalar(),\n minval=brightness_min,\n maxval=brightness_max)\n brightened_image = tf.mul(flipped_image, brightness_value)\n distort_result = tf.expand_dims(brightened_image, 0, name='DistortResult')\n return jpeg_data, distort_result\n\n\ndef variable_summaries(var, name):\n \"\"\"Attach a lot of summaries to a Tensor (for TensorBoard visualization).\"\"\"\n with tf.name_scope('summaries'):\n mean = tf.reduce_mean(var)\n tf.scalar_summary('mean/' + name, mean)\n with tf.name_scope('stddev'):\n stddev = tf.sqrt(tf.reduce_sum(tf.square(var - mean)))\n tf.scalar_summary('sttdev/' + name, stddev)\n tf.scalar_summary('max/' + name, tf.reduce_max(var))\n tf.scalar_summary('min/' + name, tf.reduce_min(var))\n tf.histogram_summary(name, var)\n\n\ndef add_final_training_ops(class_count, final_tensor_name, bottleneck_tensor):\n \"\"\"Adds a new softmax and fully-connected layer for training.\n\n We need to retrain the top layer to identify our new classes, so this function\n adds the right operations to the graph, along with some variables to hold the\n weights, and then sets up all the gradients for the backward pass.\n\n The set up for the softmax and fully-connected layers is based on:\n https://tensorflow.org/versions/master/tutorials/mnist/beginners/index.html\n\n Args:\n class_count: Integer of how many categories of things we're trying to\n recognize.\n final_tensor_name: Name string for the new final node that produces results.\n bottleneck_tensor: The output of the main CNN graph.\n\n Returns:\n The tensors for the training and cross entropy results, and tensors for the\n bottleneck input and ground truth input.\n \"\"\"\n with tf.name_scope('input'):\n bottleneck_input = tf.placeholder_with_default(\n bottleneck_tensor, shape=[None, BOTTLENECK_TENSOR_SIZE],\n name='BottleneckInputPlaceholder')\n\n ground_truth_input = tf.placeholder(tf.float32,\n [None, class_count],\n name='GroundTruthInput')\n\n # Organizing the following ops as `final_training_ops` so they're easier\n # to see in TensorBoard\n layer_name = 'final_training_ops'\n with tf.name_scope(layer_name):\n with tf.name_scope('weights'):\n layer_weights = tf.Variable(tf.truncated_normal([BOTTLENECK_TENSOR_SIZE, class_count], stddev=0.001), name='final_weights')\n variable_summaries(layer_weights, layer_name + '/weights')\n with tf.name_scope('biases'):\n layer_biases = tf.Variable(tf.zeros([class_count]), name='final_biases')\n variable_summaries(layer_biases, layer_name + '/biases')\n with tf.name_scope('Wx_plus_b'):\n logits = tf.matmul(bottleneck_input, layer_weights) + layer_biases\n tf.histogram_summary(layer_name + '/pre_activations', logits)\n\n final_tensor = tf.nn.softmax(logits, name=final_tensor_name)\n tf.histogram_summary(final_tensor_name + '/activations', final_tensor)\n\n with tf.name_scope('cross_entropy'):\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(\n logits, ground_truth_input)\n with tf.name_scope('total'):\n cross_entropy_mean = tf.reduce_mean(cross_entropy)\n tf.scalar_summary('cross entropy', cross_entropy_mean)\n\n with tf.name_scope('train'):\n train_step = tf.train.GradientDescentOptimizer(FLAGS.learning_rate).minimize(\n cross_entropy_mean)\n\n return (train_step, cross_entropy_mean, bottleneck_input, ground_truth_input,\n final_tensor)\n\n\ndef add_evaluation_step(result_tensor, ground_truth_tensor):\n \"\"\"Inserts the operations we need to evaluate the accuracy of our results.\n\n Args:\n result_tensor: The new final node that produces results.\n ground_truth_tensor: The node we feed ground truth data\n into.\n\n Returns:\n Nothing.\n \"\"\"\n with tf.name_scope('accuracy'):\n with tf.name_scope('correct_prediction'):\n correct_prediction = tf.equal(tf.argmax(result_tensor, 1), \\\n tf.argmax(ground_truth_tensor, 1))\n with tf.name_scope('accuracy'):\n evaluation_step = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n tf.scalar_summary('accuracy', evaluation_step)\n return evaluation_step\n\n\ndef main(_):\n # Setup the directory we'll write summaries to for TensorBoard\n if tf.gfile.Exists(FLAGS.summaries_dir):\n tf.gfile.DeleteRecursively(FLAGS.summaries_dir)\n tf.gfile.MakeDirs(FLAGS.summaries_dir)\n\n # Set up the pre-trained graph.\n maybe_download_and_extract()\n graph, bottleneck_tensor, jpeg_data_tensor, resized_image_tensor = (\n create_inception_graph())\n\n # Look at the folder structure, and create lists of all the images.\n image_lists = create_image_lists(FLAGS.image_dir, FLAGS.testing_percentage,\n FLAGS.validation_percentage)\n class_count = len(image_lists.keys())\n if class_count == 0:\n print('No valid folders of images found at ' + FLAGS.image_dir)\n return -1\n if class_count == 1:\n print('Only one valid folder of images found at ' + FLAGS.image_dir +\n ' - multiple classes are needed for classification.')\n return -1\n\n # See if the command-line flags mean we're applying any distortions.\n do_distort_images = should_distort_images(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness)\n sess = tf.Session()\n\n if do_distort_images:\n # We will be applying distortions, so setup the operations we'll need.\n distorted_jpeg_data_tensor, distorted_image_tensor = add_input_distortions(\n FLAGS.flip_left_right, FLAGS.random_crop, FLAGS.random_scale,\n FLAGS.random_brightness)\n else:\n # We'll make sure we've calculated the 'bottleneck' image summaries and\n # cached them on disk.\n cache_bottlenecks(sess, image_lists, FLAGS.image_dir, FLAGS.bottleneck_dir,\n jpeg_data_tensor, bottleneck_tensor)\n\n # Add the new layer that we'll be training.\n (train_step, cross_entropy, bottleneck_input, ground_truth_input,\n final_tensor) = add_final_training_ops(len(image_lists.keys()),\n FLAGS.final_tensor_name,\n bottleneck_tensor)\n\n # Create the operations we need to evaluate the accuracy of our new layer.\n evaluation_step = add_evaluation_step(final_tensor, ground_truth_input)\n\n # Merge all the summaries and write them out to /tmp/retrain_logs (by default)\n merged = tf.merge_all_summaries()\n train_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/train',\n sess.graph)\n validation_writer = tf.train.SummaryWriter(FLAGS.summaries_dir + '/validation')\n\n # Set up all our weights to their initial default values.\n init = tf.initialize_all_variables()\n sess.run(init)\n\n # Run the training for as many cycles as requested on the command line.\n for i in range(FLAGS.how_many_training_steps):\n # Get a batch of input bottleneck values, either calculated fresh every time\n # with distortions applied, or from the cache stored on disk.\n if do_distort_images:\n train_bottlenecks, train_ground_truth = get_random_distorted_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.image_dir, distorted_jpeg_data_tensor,\n distorted_image_tensor, resized_image_tensor, bottleneck_tensor)\n else:\n train_bottlenecks, train_ground_truth = get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.train_batch_size, 'training',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n bottleneck_tensor)\n # Feed the bottlenecks and ground truth into the graph, and run a training\n # step. Capture training summaries for TensorBoard with the `merged` op.\n train_summary, _ = sess.run([merged, train_step],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n train_writer.add_summary(train_summary, i)\n\n # Every so often, print out how well the graph is training.\n is_last_step = (i + 1 == FLAGS.how_many_training_steps)\n if (i % FLAGS.eval_step_interval) == 0 or is_last_step:\n train_accuracy, cross_entropy_value = sess.run(\n [evaluation_step, cross_entropy],\n feed_dict={bottleneck_input: train_bottlenecks,\n ground_truth_input: train_ground_truth})\n print('%s: Step %d: Train accuracy = %.1f%%' % (datetime.now(), i,\n train_accuracy * 100))\n print('%s: Step %d: Cross entropy = %f' % (datetime.now(), i,\n cross_entropy_value))\n validation_bottlenecks, validation_ground_truth = (\n get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.validation_batch_size, 'validation',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n bottleneck_tensor))\n # Run a validation step and capture training summaries for TensorBoard\n # with the `merged` op.\n validation_summary, validation_accuracy = sess.run(\n [merged, evaluation_step],\n feed_dict={bottleneck_input: validation_bottlenecks,\n ground_truth_input: validation_ground_truth})\n validation_writer.add_summary(validation_summary, i)\n print('%s: Step %d: Validation accuracy = %.1f%%' %\n (datetime.now(), i, validation_accuracy * 100))\n\n # We've completed all our training, so run a final test evaluation on\n # some new images we haven't used before.\n test_bottlenecks, test_ground_truth = get_random_cached_bottlenecks(\n sess, image_lists, FLAGS.test_batch_size, 'testing',\n FLAGS.bottleneck_dir, FLAGS.image_dir, jpeg_data_tensor,\n bottleneck_tensor)\n test_accuracy = sess.run(\n evaluation_step,\n feed_dict={bottleneck_input: test_bottlenecks,\n ground_truth_input: test_ground_truth})\n print('Final test accuracy = %.1f%%' % (test_accuracy * 100))\n\n # Write out the trained graph and labels with the weights stored as constants.\n output_graph_def = graph_util.convert_variables_to_constants(\n sess, graph.as_graph_def(), [FLAGS.final_tensor_name])\n with gfile.FastGFile(FLAGS.output_graph, 'wb') as f:\n f.write(output_graph_def.SerializeToString())\n with gfile.FastGFile(FLAGS.output_labels, 'w') as f:\n f.write('\\n'.join(image_lists.keys()) + '\\n')\n\n\nif __name__ == '__main__':\n tf.app.run()\n"
] | [
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.gfile.DeleteRecursively",
"tensorflow.zeros",
"tensorflow.gfile.Exists",
"numpy.squeeze",
"tensorflow.cast",
"tensorflow.python.platform.gfile.Exists",
"tensorflow.gfile.MakeDirs",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.app.flags.DEFINE_boolean",
"tensorflow.pack",
"tensorflow.import_graph_def",
"tensorflow.image.random_flip_left_right",
"tensorflow.merge_all_summaries",
"tensorflow.placeholder_with_default",
"tensorflow.app.flags.DEFINE_integer",
"tensorflow.squeeze",
"tensorflow.initialize_all_variables",
"tensorflow.name_scope",
"tensorflow.Session",
"tensorflow.square",
"tensorflow.argmax",
"numpy.zeros",
"tensorflow.logging.fatal",
"tensorflow.image.decode_jpeg",
"tensorflow.app.run",
"tensorflow.image.resize_bilinear",
"tensorflow.matmul",
"tensorflow.truncated_normal",
"tensorflow.placeholder",
"tensorflow.train.GradientDescentOptimizer",
"tensorflow.python.platform.gfile.FastGFile",
"tensorflow.reduce_max",
"tensorflow.nn.softmax",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.scalar_summary",
"tensorflow.expand_dims",
"tensorflow.mul",
"tensorflow.random_crop",
"tensorflow.train.SummaryWriter",
"tensorflow.reduce_min",
"tensorflow.app.flags.DEFINE_float",
"tensorflow.histogram_summary",
"tensorflow.GraphDef"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
cgnik/euler | [
"fe70459a0e0d0272980300a4782872f2e545fea5"
] | [
"src/py/util/cartesian.py"
] | [
"import itertools as it\nfrom operator import mul\n\nimport numpy as np\nfrom functools import reduce\n\nfrom util.factoring import factors\n\n\ndef cartesian(boundary, x, *args, reducer=mul):\n products = x\n last_products = []\n\n def gen():\n yield x\n if args and len(args):\n for z in args:\n yield z\n else:\n while True: yield products\n return None\n\n for products in gen():\n products = np.unique([reduce(reducer, x) for x in it.product(x, products)])\n products = list(filter(boundary, products))\n products.sort()\n if products == last_products:\n break\n last_products = products\n return np.array(products)\n\n\ndef cartesian_factors(n, facs):\n cart = cartesian(lambda x: n % x == 0, facs)\n return facs, cart[np.where(n % cart == 0)]\n\n\ndef cartesian_product(nums, limit=None):\n product_pairs = it.product(np.array(nums), np.array(nums))\n products = [a[0] * a[1] for a in product_pairs]\n if limit is not None:\n divisors = list(filter(lambda x: limit % x == 0, products))\n else:\n divisors = products\n return divisors\n\n\ndef cartesian_loop(n, count):\n facs = list(factors(n))\n for c in range(0, count):\n facs = list(set(cartesian_product(facs, n)))\n return facs\n\n\ndef all_factors(num, include_self=False):\n answer = cartesian_loop(num, 8)\n if not include_self:\n answer.remove(num)\n return answer\n"
] | [
[
"numpy.array",
"numpy.where"
]
] | [
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.