code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
---|---|---|---|---|---|
from .base_endpoint import BaseEndpoint
class ContactList(BaseEndpoint):
"""
Class representation of the ContactList endpoint.
Examples:
If you want to use contact lists' methods through an instance of the Emarsys
client:
>>> from pymarsys import SyncConnection, Emarsys
>>> connection = SyncConnection('username', 'password')
>>> client = Emarsys(connection)
>>> client.contact_list
<pymarsys.contact_list.ContactList at 0x1050f7048>
If you want to use contact lists' methods trough an instance of the ContactList
endpoint class:
>>> from pymarsys import SyncConnection
>>> from pymarsys.contact_list import ContactList
>>> connection = SyncConnection('username', 'password')
>>> contact_list = ContactList(connection)
>>> contact_list
<pymarsys.contact_list.ContactList at 0x10333ec88>
"""
def __init__(self, connection, endpoint="api/v2/contactlist/"):
super().__init__(connection, endpoint)
def create(self, name, key_id=3, with_contacts_ids=None, description=None):
"""
Create a contact list.
https://dev.emarsys.com/v2/contact-lists/create-a-contact-list
:param name: name of the list to create
:param key_id: Key which identifies the contact. This can be a field
id, id, uid or eid. If left empty, the email address (field ID 3) will
be used by default.
:param with_contacts_ids: list of key's value to add to the list. e.g. ['[email protected]',]
:param description: Additional information about the contact list.
:return: The API response payload.
.
Examples:
If you want to create a list which name is test_list and assign [email protected] to this list:
>>> client.lists.create("test_list", ['[email protected]',])
{'data': {'id': 123}, 'replyCode': 0, 'replyText': 'OK'}
"""
payload = {
"key_id": key_id,
"name": name,
"description": description,
"external_ids": with_contacts_ids,
}
return self.connection.make_call("POST", self.endpoint, payload=payload)
def add_contacts(self, list_id, contacts_ids, key_id=3):
"""
Add multiple contacts to an existing list.
https://dev.emarsys.com/v2/contact-lists/add-contacts-to-a-contact-list
:param list_id: Identifier of the list you want to add a contact to.
:param contacts_ids: List of contact identifier you want to add to the list
:param key_id: Key which identifies the contact. This can be a field
id, id, uid or eid. If left empty, the email address (field ID 3) will
be used by default.
:return: The API response payload.
Examples:
If you want to add two [email protected] to the list test_list:
>>> client.contact_list.add('test_list', ['[email protected]',])
{'data': {'inserted_contacts': 123}, 'replyCode': 0, 'replyText': 'OK'}
"""
payload = {"key_id": key_id, "external_ids": contacts_ids}
endpoint = "{}{}/add/".format(self.endpoint, list_id)
return self.connection.make_call("POST", endpoint, payload=payload)
| transcovo/pymarsys | pymarsys/contact_list.py | Python | apache-2.0 | 3,259 |
from listenbrainz_spark.tests import SparkTestCase
from listenbrainz_spark import utils, path, config
import listenbrainz_spark.utils.mapping as mapping_utils
from pyspark.sql import Row
class MappingUtilsTestCase(SparkTestCase):
mapping_path = path.MBID_MSID_MAPPING
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.upload_test_mapping_to_hdfs(cls.mapping_path)
def test_get_unique_rows_from_mapping(self):
df = utils.read_files_from_HDFS(self.mapping_path)
mapping_df = mapping_utils.get_unique_rows_from_mapping(df)
self.assertEqual(mapping_df.count(), 3)
cols = [
'mb_artist_credit_id',
'mb_artist_credit_mbids',
'mb_artist_credit_name',
'mb_recording_mbid',
'mb_recording_name',
'mb_release_mbid',
'mb_release_name',
'msb_artist_credit_name_matchable',
'msb_recording_name_matchable'
]
self.assertEqual(sorted(mapping_df.columns), sorted(cols))
def test_unaccent_artist_and_track_name(self):
df = utils.create_dataframe(
Row(
artist_name='égè,câ,î or ô)tñü or ï(ç)',
track_name='égè,câ,î or ô)tñü lalaor ïïï(ç)'
),
schema=None
)
res_df = mapping_utils.unaccent_artist_and_track_name(df)
self.assertEqual(res_df.collect()[0].unaccented_artist_name, 'ege,ca,i or o)tnu or i(c)')
self.assertEqual(res_df.collect()[0].unaccented_track_name, 'ege,ca,i or o)tnu lalaor iii(c)')
def test_convert_text_fields_to_matchable(self):
df = utils.create_dataframe(
Row(
artist_name='égè,câ,î or ô)tñü or ï(ç) !"#$%&\'()*+, L ABD don''t-./:;<=>?@[]^_`{|}~',
track_name='égè,câ,î or ô)tñü lalaor ïïï(ç)!"#$%&\'()*+, L ABD don''t lie-./:;<=>?@[]^_`{|}~'
),
schema=None
)
res_df = mapping_utils.convert_text_fields_to_matchable(df)
res_df.show()
self.assertEqual(res_df.collect()[0].artist_name_matchable, 'egecaiorotnuoriclabddont')
self.assertEqual(res_df.collect()[0].track_name_matchable, 'egecaiorotnulalaoriiiclabddontlie')
| Freso/listenbrainz-server | listenbrainz_spark/utils/tests/test_mapping.py | Python | gpl-2.0 | 2,305 |
from django.core import validators
from django.db import models
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _
class FlatPage(models.Model):
url = models.CharField(_('URL'), max_length=100, validator_list=[validators.isAlphaNumericURL], db_index=True,
help_text=_("Example: '/about/contact/'. Make sure to have leading and trailing slashes."))
title = models.CharField(_('title'), max_length=200)
content = models.TextField(_('content'))
enable_comments = models.BooleanField(_('enable comments'))
template_name = models.CharField(_('template name'), max_length=70, blank=True,
help_text=_("Example: 'flatpages/contact_page.html'. If this isn't provided, the system will use 'flatpages/default.html'."))
registration_required = models.BooleanField(_('registration required'), help_text=_("If this is checked, only logged-in users will be able to view the page."))
sites = models.ManyToManyField(Site)
class Meta:
db_table = 'django_flatpage'
verbose_name = _('flat page')
verbose_name_plural = _('flat pages')
ordering = ('url',)
class Admin:
fields = (
(None, {'fields': ('url', 'title', 'content', 'sites')}),
(_('Advanced options'), {'classes': 'collapse', 'fields': ('enable_comments', 'registration_required', 'template_name')}),
)
list_filter = ('sites',)
search_fields = ('url', 'title')
def __unicode__(self):
return u"%s -- %s" % (self.url, self.title)
def get_absolute_url(self):
return self.url
| pelle/talk.org | django/contrib/flatpages/models.py | Python | gpl-3.0 | 1,630 |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# This module copyright (C) 2014 Therp BV (<http://therp.nl>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import types
from openerp import release
from openerp.osv.orm import TransientModel
from openerp.osv import fields
from openerp.openupgrade.openupgrade import table_exists
from openerp.tools import config, safe_eval
# A collection of functions used in
# openerp/modules/loading.py
def add_module_dependencies(cr, module_list):
"""
Select (new) dependencies from the modules in the list
so that we can inject them into the graph at upgrade
time. Used in the modified OpenUpgrade Server,
not to be called from migration scripts
Also take the OpenUpgrade configuration directives 'forced_deps'
and 'autoinstall' into account. From any additional modules
that these directives can add, the dependencies are added as
well (but these directives are not checked for the occurrence
of any of the dependencies).
"""
if not module_list:
return module_list
forced_deps = safe_eval.safe_eval(
config.get_misc(
'openupgrade', 'forced_deps_' + release.version,
config.get_misc('openupgrade', 'forced_deps', '{}')))
autoinstall = safe_eval.safe_eval(
config.get_misc(
'openupgrade', 'autoinstall_' + release.version,
config.get_misc('openupgrade', 'autoinstall', '{}')))
for module in list(module_list):
module_list += forced_deps.get(module, [])
module_list += autoinstall.get(module, [])
cr.execute("""
SELECT ir_module_module_dependency.name
FROM
ir_module_module,
ir_module_module_dependency
WHERE
module_id = ir_module_module.id
AND ir_module_module.name in %s
""", (tuple(module_list),))
return list(set(module_list + [x[0] for x in cr.fetchall()]))
def log_model(model, local_registry):
"""
OpenUpgrade: Store the characteristics of the BaseModel and its fields
in the local registry, so that we can compare changes with the
main registry
"""
if not model._name:
return
# persistent models only
if isinstance(model, TransientModel):
return
model_registry = local_registry.setdefault(
model._name, {})
if model._inherits:
model_registry['_inherits'] = {'_inherits': unicode(model._inherits)}
for k, v in model._columns.items():
properties = {
'type': v._type,
'isfunction': (
isinstance(v, fields.function) and 'function' or ''),
'relation': (
v._type in ('many2many', 'many2one', 'one2many')
and v._obj or ''
),
'required': v.required and 'required' or '',
'selection_keys': '',
'req_default': '',
'inherits': '',
}
if hasattr(v, 'oldname'):
properties['oldname'] = v.oldname
if v._type == 'selection':
if hasattr(v.selection, "__iter__"):
properties['selection_keys'] = unicode(
sorted([x[0] for x in v.selection]))
else:
properties['selection_keys'] = 'function'
if v.required and k in model._defaults:
if isinstance(model._defaults[k], types.FunctionType):
# todo: in OpenERP 5 (and in 6 as well),
# literals are wrapped in a lambda function
properties['req_default'] = 'function'
else:
properties['req_default'] = unicode(
model._defaults[k])
for key, value in properties.items():
if value:
model_registry.setdefault(k, {})[key] = value
def get_record_id(cr, module, model, field, mode):
"""
OpenUpgrade: get or create the id from the record table matching
the key parameter values
"""
cr.execute(
"SELECT id FROM openupgrade_record "
"WHERE module = %s AND model = %s AND "
"field = %s AND mode = %s AND type = %s",
(module, model, field, mode, 'field')
)
record = cr.fetchone()
if record:
return record[0]
cr.execute(
"INSERT INTO openupgrade_record "
"(module, model, field, mode, type) "
"VALUES (%s, %s, %s, %s, %s)",
(module, model, field, mode, 'field')
)
cr.execute(
"SELECT id FROM openupgrade_record "
"WHERE module = %s AND model = %s AND "
"field = %s AND mode = %s AND type = %s",
(module, model, field, mode, 'field')
)
return cr.fetchone()[0]
def compare_registries(cr, module, registry, local_registry):
"""
OpenUpgrade: Compare the local registry with the global registry,
log any differences and merge the local registry with
the global one.
"""
if not table_exists(cr, 'openupgrade_record'):
return
for model, fields in local_registry.items():
registry.setdefault(model, {})
for field, attributes in fields.items():
old_field = registry[model].setdefault(field, {})
mode = old_field and 'modify' or 'create'
record_id = False
for key, value in attributes.items():
if key not in old_field or old_field[key] != value:
if not record_id:
record_id = get_record_id(
cr, module, model, field, mode)
cr.execute(
"SELECT id FROM openupgrade_attribute "
"WHERE name = %s AND value = %s AND "
"record_id = %s",
(key, value, record_id)
)
if not cr.fetchone():
cr.execute(
"INSERT INTO openupgrade_attribute "
"(name, value, record_id) VALUES (%s, %s, %s)",
(key, value, record_id)
)
old_field[key] = value
| savoirfairelinux/OpenUpgrade | openerp/openupgrade/openupgrade_loading.py | Python | agpl-3.0 | 7,021 |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.6.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1_env_var import V1EnvVar
class TestV1EnvVar(unittest.TestCase):
""" V1EnvVar unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1EnvVar(self):
"""
Test V1EnvVar
"""
model = kubernetes.client.models.v1_env_var.V1EnvVar()
if __name__ == '__main__':
unittest.main()
| skuda/client-python | kubernetes/test/test_v1_env_var.py | Python | apache-2.0 | 803 |
r"""Implementation of SPINN in TensorFlow eager execution.
SPINN: Stack-Augmented Parser-Interpreter Neural Network.
Ths file contains model definition and code for training the model.
The model definition is based on PyTorch implementation at:
https://github.com/jekbradbury/examples/tree/spinn/snli
which was released under a BSD 3-Clause License at:
https://github.com/jekbradbury/examples/blob/spinn/LICENSE:
Copyright (c) 2017,
All rights reserved.
See ./LICENSE for more details.
Instructions for use:
* See `README.md` for details on how to prepare the SNLI and GloVe data.
* Suppose you have prepared the data at "/tmp/spinn-data", use the folloing
command to train the model:
```bash
python spinn.py --data_root /tmp/spinn-data --logdir /tmp/spinn-logs
```
Checkpoints and TensorBoard summaries will be written to "/tmp/spinn-logs".
References:
* Bowman, S.R., Gauthier, J., Rastogi A., Gupta, R., Manning, C.D., & Potts, C.
(2016). A Fast Unified Model for Parsing and Sentence Understanding.
https://arxiv.org/abs/1603.06021
* Bradbury, J. (2017). Recursive Neural Networks with PyTorch.
https://devblogs.nvidia.com/parallelforall/recursive-neural-networks-pytorch/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import itertools
import os
import sys
import time
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.spinn import data
def _bundle(lstm_iter):
"""Concatenate a list of Tensors along 1st axis and split result into two.
Args:
lstm_iter: A `list` of `N` dense `Tensor`s, each of which has the shape
(R, 2 * M).
Returns:
A `list` of two dense `Tensor`s, each of which has the shape (N * R, M).
"""
return tf.split(tf.concat(lstm_iter, 0), 2, axis=1)
def _unbundle(state):
"""Concatenate a list of Tensors along 2nd axis and split result.
This is the inverse of `_bundle`.
Args:
state: A `list` of two dense `Tensor`s, each of which has the shape (R, M).
Returns:
A `list` of `R` dense `Tensors`, each of which has the shape (1, 2 * M).
"""
return tf.split(tf.concat(state, 1), state[0].shape[0], axis=0)
class Reducer(tfe.Network):
"""A module that applies reduce operation on left and right vectors."""
def __init__(self, size, tracker_size=None):
super(Reducer, self).__init__()
self.left = self.track_layer(tf.layers.Dense(5 * size, activation=None))
self.right = self.track_layer(
tf.layers.Dense(5 * size, activation=None, use_bias=False))
if tracker_size is not None:
self.track = self.track_layer(
tf.layers.Dense(5 * size, activation=None, use_bias=False))
else:
self.track = None
def call(self, left_in, right_in, tracking=None):
"""Invoke forward pass of the Reduce module.
This method feeds a linear combination of `left_in`, `right_in` and
`tracking` into a Tree LSTM and returns the output of the Tree LSTM.
Args:
left_in: A list of length L. Each item is a dense `Tensor` with
the shape (1, n_dims). n_dims is the size of the embedding vector.
right_in: A list of the same length as `left_in`. Each item should have
the same shape as the items of `left_in`.
tracking: Optional list of the same length as `left_in`. Each item is a
dense `Tensor` with shape (1, tracker_size * 2). tracker_size is the
size of the Tracker's state vector.
Returns:
Output: A list of length batch_size. Each item has the shape (1, n_dims).
"""
left, right = _bundle(left_in), _bundle(right_in)
lstm_in = self.left(left[0]) + self.right(right[0])
if self.track and tracking:
lstm_in += self.track(_bundle(tracking)[0])
return _unbundle(self._tree_lstm(left[1], right[1], lstm_in))
def _tree_lstm(self, c1, c2, lstm_in):
a, i, f1, f2, o = tf.split(lstm_in, 5, axis=1)
c = tf.tanh(a) * tf.sigmoid(i) + tf.sigmoid(f1) * c1 + tf.sigmoid(f2) * c2
h = tf.sigmoid(o) * tf.tanh(c)
return h, c
class Tracker(tfe.Network):
"""A module that tracks the history of the sentence with an LSTM."""
def __init__(self, tracker_size, predict):
"""Constructor of Tracker.
Args:
tracker_size: Number of dimensions of the underlying `LSTMCell`.
predict: (`bool`) Whether prediction mode is enabled.
"""
super(Tracker, self).__init__()
self._rnn = self.track_layer(tf.nn.rnn_cell.LSTMCell(tracker_size))
self._state_size = tracker_size
if predict:
self._transition = self.track_layer(tf.layers.Dense(4))
else:
self._transition = None
def reset_state(self):
self.state = None
def call(self, bufs, stacks):
"""Invoke the forward pass of the Tracker module.
This method feeds the concatenation of the top two elements of the stacks
into an LSTM cell and returns the resultant state of the LSTM cell.
Args:
bufs: A `list` of length batch_size. Each item is a `list` of
max_sequence_len (maximum sequence length of the batch). Each item
of the nested list is a dense `Tensor` of shape (1, d_proj), where
d_proj is the size of the word embedding vector or the size of the
vector space that the word embedding vector is projected to.
stacks: A `list` of size batch_size. Each item is a `list` of
variable length corresponding to the current height of the stack.
Each item of the nested list is a dense `Tensor` of shape (1, d_proj).
Returns:
1. A list of length batch_size. Each item is a dense `Tensor` of shape
(1, d_tracker * 2).
2. If under predict mode, result of applying a Dense layer on the
first state vector of the RNN. Else, `None`.
"""
buf = _bundle([buf[-1] for buf in bufs])[0]
stack1 = _bundle([stack[-1] for stack in stacks])[0]
stack2 = _bundle([stack[-2] for stack in stacks])[0]
x = tf.concat([buf, stack1, stack2], 1)
if self.state is None:
batch_size = int(x.shape[0])
zeros = tf.zeros((batch_size, self._state_size), dtype=tf.float32)
self.state = [zeros, zeros]
_, self.state = self._rnn(x, self.state)
unbundled = _unbundle(self.state)
if self._transition:
return unbundled, self._transition(self.state[0])
else:
return unbundled, None
class SPINN(tfe.Network):
"""Stack-augmented Parser-Interpreter Neural Network.
See https://arxiv.org/abs/1603.06021 for more details.
"""
def __init__(self, config):
"""Constructor of SPINN.
Args:
config: A `namedtupled` with the following attributes.
d_proj - (`int`) number of dimensions of the vector space to project the
word embeddings to.
d_tracker - (`int`) number of dimensions of the Tracker's state vector.
d_hidden - (`int`) number of the dimensions of the hidden state, for the
Reducer module.
n_mlp_layers - (`int`) number of multi-layer perceptron layers to use to
convert the output of the `Feature` module to logits.
predict - (`bool`) Whether the Tracker will enabled predictions.
"""
super(SPINN, self).__init__()
self.config = config
self.reducer = self.track_layer(Reducer(config.d_hidden, config.d_tracker))
if config.d_tracker is not None:
self.tracker = self.track_layer(Tracker(config.d_tracker, config.predict))
else:
self.tracker = None
def call(self, buffers, transitions, training=False):
"""Invoke the forward pass of the SPINN model.
Args:
buffers: Dense `Tensor` of shape
(max_sequence_len, batch_size, config.d_proj).
transitions: Dense `Tensor` with integer values that represent the parse
trees of the sentences. A value of 2 indicates "reduce"; a value of 3
indicates "shift". Shape: (max_sequence_len * 2 - 3, batch_size).
training: Whether the invocation is under training mode.
Returns:
Output `Tensor` of shape (batch_size, config.d_embed).
"""
max_sequence_len, batch_size, d_proj = (int(x) for x in buffers.shape)
# Split the buffers into left and right word items and put the initial
# items in a stack.
splitted = tf.split(
tf.reshape(tf.transpose(buffers, [1, 0, 2]), [-1, d_proj]),
max_sequence_len * batch_size, axis=0)
buffers = [splitted[k:k + max_sequence_len]
for k in xrange(0, len(splitted), max_sequence_len)]
stacks = [[buf[0], buf[0]] for buf in buffers]
if self.tracker:
# Reset tracker state for new batch.
self.tracker.reset_state()
num_transitions = transitions.shape[0]
# Iterate through transitions and perform the appropriate stack-pop, reduce
# and stack-push operations.
transitions = transitions.numpy()
for i in xrange(num_transitions):
trans = transitions[i]
if self.tracker:
# Invoke tracker to obtain the current tracker states for the sentences.
tracker_states, trans_hypothesis = self.tracker(buffers, stacks)
if trans_hypothesis:
trans = tf.argmax(trans_hypothesis, axis=-1)
else:
tracker_states = itertools.repeat(None)
lefts, rights, trackings = [], [], []
for transition, buf, stack, tracking in zip(
trans, buffers, stacks, tracker_states):
if int(transition) == 3: # Shift.
stack.append(buf.pop())
elif int(transition) == 2: # Reduce.
rights.append(stack.pop())
lefts.append(stack.pop())
trackings.append(tracking)
if rights:
reducer_output = self.reducer(lefts, rights, trackings)
reduced = iter(reducer_output)
for transition, stack in zip(trans, stacks):
if int(transition) == 2: # Reduce.
stack.append(next(reduced))
return _bundle([stack.pop() for stack in stacks])[0]
class SNLIClassifier(tfe.Network):
"""SNLI Classifier Model.
A model aimed at solving the SNLI (Standford Natural Language Inference)
task, using the SPINN model from above. For details of the task, see:
https://nlp.stanford.edu/projects/snli/
"""
def __init__(self, config, embed):
"""Constructor of SNLICLassifier.
Args:
config: A namedtuple containing required configurations for the model. It
needs to have the following attributes.
projection - (`bool`) whether the word vectors are to be projected onto
another vector space (of `d_proj` dimensions).
d_proj - (`int`) number of dimensions of the vector space to project the
word embeddings to.
embed_dropout - (`float`) dropout rate for the word embedding vectors.
n_mlp_layers - (`int`) number of multi-layer perceptron (MLP) layers to
use to convert the output of the `Feature` module to logits.
mlp_dropout - (`float`) dropout rate of the MLP layers.
d_out - (`int`) number of dimensions of the final output of the MLP
layers.
lr - (`float`) learning rate.
embed: A embedding matrix of shape (vocab_size, d_embed).
"""
super(SNLIClassifier, self).__init__()
self.config = config
self.embed = tf.constant(embed)
self.projection = self.track_layer(tf.layers.Dense(config.d_proj))
self.embed_bn = self.track_layer(tf.layers.BatchNormalization())
self.embed_dropout = self.track_layer(
tf.layers.Dropout(rate=config.embed_dropout))
self.encoder = self.track_layer(SPINN(config))
self.feature_bn = self.track_layer(tf.layers.BatchNormalization())
self.feature_dropout = self.track_layer(
tf.layers.Dropout(rate=config.mlp_dropout))
self.mlp_dense = []
self.mlp_bn = []
self.mlp_dropout = []
for _ in xrange(config.n_mlp_layers):
self.mlp_dense.append(self.track_layer(tf.layers.Dense(config.d_mlp)))
self.mlp_bn.append(
self.track_layer(tf.layers.BatchNormalization()))
self.mlp_dropout.append(
self.track_layer(tf.layers.Dropout(rate=config.mlp_dropout)))
self.mlp_output = self.track_layer(tf.layers.Dense(
config.d_out,
kernel_initializer=tf.random_uniform_initializer(minval=-5e-3,
maxval=5e-3)))
def call(self,
premise,
premise_transition,
hypothesis,
hypothesis_transition,
training=False):
"""Invoke the forward pass the SNLIClassifier model.
Args:
premise: The word indices of the premise sentences, with shape
(max_prem_seq_len, batch_size).
premise_transition: The transitions for the premise sentences, with shape
(max_prem_seq_len * 2 - 3, batch_size).
hypothesis: The word indices of the hypothesis sentences, with shape
(max_hypo_seq_len, batch_size).
hypothesis_transition: The transitions for the hypothesis sentences, with
shape (max_hypo_seq_len * 2 - 3, batch_size).
training: Whether the invocation is under training mode.
Returns:
The logits, as a dense `Tensor` of shape (batch_size, d_out), where d_out
is the size of the output vector.
"""
# Perform embedding lookup on the premise and hypothesis inputs, which have
# the word-index format.
premise_embed = tf.nn.embedding_lookup(self.embed, premise)
hypothesis_embed = tf.nn.embedding_lookup(self.embed, hypothesis)
if self.config.projection:
# Project the embedding vectors to another vector space.
premise_embed = self.projection(premise_embed)
hypothesis_embed = self.projection(hypothesis_embed)
# Perform batch normalization and dropout on the possibly projected word
# vectors.
premise_embed = self.embed_bn(premise_embed, training=training)
hypothesis_embed = self.embed_bn(hypothesis_embed, training=training)
premise_embed = self.embed_dropout(premise_embed, training=training)
hypothesis_embed = self.embed_dropout(hypothesis_embed, training=training)
# Run the batch-normalized and dropout-processed word vectors through the
# SPINN encoder.
premise = self.encoder(premise_embed, premise_transition,
training=training)
hypothesis = self.encoder(hypothesis_embed, hypothesis_transition,
training=training)
# Combine encoder outputs for premises and hypotheses into logits.
# Then apply batch normalization and dropuout on the logits.
logits = tf.concat(
[premise, hypothesis, premise - hypothesis, premise * hypothesis], 1)
logits = self.feature_dropout(
self.feature_bn(logits, training=training), training=training)
# Apply the multi-layer perceptron on the logits.
for dense, bn, dropout in zip(
self.mlp_dense, self.mlp_bn, self.mlp_dropout):
logits = tf.nn.elu(dense(logits))
logits = dropout(bn(logits, training=training), training=training)
logits = self.mlp_output(logits)
return logits
class SNLIClassifierTrainer(object):
"""A class that coordinates the training of an SNLIClassifier."""
def __init__(self, snli_classifier, lr):
"""Constructor of SNLIClassifierTrainer.
Args:
snli_classifier: An instance of `SNLIClassifier`.
lr: Learning rate.
"""
self._model = snli_classifier
# Create a custom learning rate Variable for the RMSProp optimizer, because
# the learning rate needs to be manually decayed later (see
# decay_learning_rate()).
self._learning_rate = tfe.Variable(lr, name="learning_rate")
self._optimizer = tf.train.RMSPropOptimizer(self._learning_rate,
epsilon=1e-6)
def loss(self, labels, logits):
"""Calculate the loss given a batch of data.
Args:
labels: The truth labels, with shape (batch_size,).
logits: The logits output from the forward pass of the SNLIClassifier
model, with shape (batch_size, d_out), where d_out is the output
dimension size of the SNLIClassifier.
Returns:
The loss value, as a scalar `Tensor`.
"""
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits))
def train_batch(self,
labels,
premise,
premise_transition,
hypothesis,
hypothesis_transition):
"""Train model on batch of data.
Args:
labels: The truth labels, with shape (batch_size,).
premise: The word indices of the premise sentences, with shape
(max_prem_seq_len, batch_size).
premise_transition: The transitions for the premise sentences, with shape
(max_prem_seq_len * 2 - 3, batch_size).
hypothesis: The word indices of the hypothesis sentences, with shape
(max_hypo_seq_len, batch_size).
hypothesis_transition: The transitions for the hypothesis sentences, with
shape (max_hypo_seq_len * 2 - 3, batch_size).
Returns:
1. loss value as a scalar `Tensor`.
2. logits as a dense `Tensor` of shape (batch_size, d_out), where d_out is
the output dimension size of the SNLIClassifier.
"""
with tfe.GradientTape() as tape:
tape.watch(self._model.variables)
logits = self._model(premise,
premise_transition,
hypothesis,
hypothesis_transition,
training=True)
loss = self.loss(labels, logits)
gradients = tape.gradient(loss, self._model.variables)
self._optimizer.apply_gradients(zip(gradients, self._model.variables),
global_step=tf.train.get_global_step())
return loss, logits
def decay_learning_rate(self, decay_by):
"""Decay learning rate of the optimizer by factor decay_by."""
self._learning_rate.assign(self._learning_rate * decay_by)
print("Decayed learning rate of optimizer to: %s" %
self._learning_rate.numpy())
@property
def learning_rate(self):
return self._learning_rate
def _batch_n_correct(logits, label):
"""Calculate number of correct predictions in a batch.
Args:
logits: A logits Tensor of shape `(batch_size, num_categories)` and dtype
`float32`.
label: A labels Tensor of shape `(batch_size,)` and dtype `int64`
Returns:
Number of correct predictions.
"""
return tf.reduce_sum(
tf.cast((tf.equal(
tf.argmax(logits, axis=1), label)), tf.float32)).numpy()
def _evaluate_on_dataset(snli_data, batch_size, model, trainer, use_gpu):
"""Run evaluation on a dataset.
Args:
snli_data: The `data.SnliData` to use in this evaluation.
batch_size: The batch size to use during this evaluation.
model: An instance of `SNLIClassifier` to evaluate.
trainer: An instance of `SNLIClassifierTrainer to use for this
evaluation.
use_gpu: Whether GPU is being used.
Returns:
1. Average loss across all examples of the dataset.
2. Average accuracy rate across all examples of the dataset.
"""
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
for label, prem, prem_trans, hypo, hypo_trans in _get_dataset_iterator(
snli_data, batch_size):
if use_gpu:
label, prem, hypo = label.gpu(), prem.gpu(), hypo.gpu()
logits = model(prem, prem_trans, hypo, hypo_trans, training=False)
loss_val = trainer.loss(label, logits)
batch_size = tf.shape(label)[0]
mean_loss(loss_val, weights=batch_size.gpu() if use_gpu else batch_size)
accuracy(tf.argmax(logits, axis=1), label)
return mean_loss.result().numpy(), accuracy.result().numpy()
def _get_dataset_iterator(snli_data, batch_size):
"""Get a data iterator for a split of SNLI data.
Args:
snli_data: A `data.SnliData` object.
batch_size: The desired batch size.
Returns:
A dataset iterator.
"""
with tf.device("/device:CPU:0"):
# Some tf.data ops, such as ShuffleDataset, are available only on CPU.
dataset = tf.data.Dataset.from_generator(
snli_data.get_generator(batch_size),
(tf.int64, tf.int64, tf.int64, tf.int64, tf.int64))
dataset = dataset.shuffle(snli_data.num_batches(batch_size))
return tfe.Iterator(dataset)
def train_spinn(embed, train_data, dev_data, test_data, config):
"""Train a SPINN model.
Args:
embed: The embedding matrix as a float32 numpy array with shape
[vocabulary_size, word_vector_len]. word_vector_len is the length of a
word embedding vector.
train_data: An instance of `data.SnliData`, for the train split.
dev_data: Same as above, for the dev split.
test_data: Same as above, for the test split.
config: A configuration object. See the argument to this Python binary for
details.
Returns:
1. Final loss value on the test split.
2. Final fraction of correct classifications on the test split.
"""
use_gpu = tfe.num_gpus() > 0 and not config.force_cpu
device = "gpu:0" if use_gpu else "cpu:0"
print("Using device: %s" % device)
log_header = (
" Time Epoch Iteration Progress (%Epoch) Loss Dev/Loss"
" Accuracy Dev/Accuracy")
log_template = (
"{:>6.0f} {:>5.0f} {:>9.0f} {:>5.0f}/{:<5.0f} {:>7.0f}% {:>8.6f} {} "
"{:12.4f} {}")
dev_log_template = (
"{:>6.0f} {:>5.0f} {:>9.0f} {:>5.0f}/{:<5.0f} {:>7.0f}% {:>8.6f} "
"{:8.6f} {:12.4f} {:12.4f}")
summary_writer = tf.contrib.summary.create_file_writer(
config.logdir, flush_millis=10000)
train_len = train_data.num_batches(config.batch_size)
with tf.device(device), \
tfe.restore_variables_on_create(
tf.train.latest_checkpoint(config.logdir)), \
summary_writer.as_default(), \
tf.contrib.summary.always_record_summaries():
model = SNLIClassifier(config, embed)
global_step = tf.train.get_or_create_global_step()
trainer = SNLIClassifierTrainer(model, config.lr)
start = time.time()
iterations = 0
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
print(log_header)
for epoch in xrange(config.epochs):
batch_idx = 0
for label, prem, prem_trans, hypo, hypo_trans in _get_dataset_iterator(
train_data, config.batch_size):
if use_gpu:
label, prem, hypo = label.gpu(), prem.gpu(), hypo.gpu()
# prem_trans and hypo_trans are used for dynamic control flow and can
# remain on CPU. Same in _evaluate_on_dataset().
iterations += 1
batch_train_loss, batch_train_logits = trainer.train_batch(
label, prem, prem_trans, hypo, hypo_trans)
batch_size = tf.shape(label)[0]
mean_loss(batch_train_loss.numpy(),
weights=batch_size.gpu() if use_gpu else batch_size)
accuracy(tf.argmax(batch_train_logits, axis=1), label)
if iterations % config.save_every == 0:
all_variables = (
model.variables + [trainer.learning_rate] + [global_step])
saver = tfe.Saver(all_variables)
saver.save(os.path.join(config.logdir, "ckpt"),
global_step=global_step)
if iterations % config.dev_every == 0:
dev_loss, dev_frac_correct = _evaluate_on_dataset(
dev_data, config.batch_size, model, trainer, use_gpu)
print(dev_log_template.format(
time.time() - start,
epoch, iterations, 1 + batch_idx, train_len,
100.0 * (1 + batch_idx) / train_len,
mean_loss.result(), dev_loss,
accuracy.result() * 100.0, dev_frac_correct * 100.0))
tf.contrib.summary.scalar("dev/loss", dev_loss)
tf.contrib.summary.scalar("dev/accuracy", dev_frac_correct)
elif iterations % config.log_every == 0:
mean_loss_val = mean_loss.result()
accuracy_val = accuracy.result()
print(log_template.format(
time.time() - start,
epoch, iterations, 1 + batch_idx, train_len,
100.0 * (1 + batch_idx) / train_len,
mean_loss_val, " " * 8, accuracy_val * 100.0, " " * 12))
tf.contrib.summary.scalar("train/loss", mean_loss_val)
tf.contrib.summary.scalar("train/accuracy", accuracy_val)
# Reset metrics.
mean_loss = tfe.metrics.Mean()
accuracy = tfe.metrics.Accuracy()
batch_idx += 1
if (epoch + 1) % config.lr_decay_every == 0:
trainer.decay_learning_rate(config.lr_decay_by)
test_loss, test_frac_correct = _evaluate_on_dataset(
test_data, config.batch_size, model, trainer, use_gpu)
print("Final test loss: %g; accuracy: %g%%" %
(test_loss, test_frac_correct * 100.0))
def main(_):
config = FLAGS
# Load embedding vectors.
vocab = data.load_vocabulary(FLAGS.data_root)
word2index, embed = data.load_word_vectors(FLAGS.data_root, vocab)
print("Loading train, dev and test data...")
train_data = data.SnliData(
os.path.join(FLAGS.data_root, "snli/snli_1.0/snli_1.0_train.txt"),
word2index, sentence_len_limit=FLAGS.sentence_len_limit)
dev_data = data.SnliData(
os.path.join(FLAGS.data_root, "snli/snli_1.0/snli_1.0_dev.txt"),
word2index, sentence_len_limit=FLAGS.sentence_len_limit)
test_data = data.SnliData(
os.path.join(FLAGS.data_root, "snli/snli_1.0/snli_1.0_test.txt"),
word2index, sentence_len_limit=FLAGS.sentence_len_limit)
train_spinn(embed, train_data, dev_data, test_data, config)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description=
"TensorFlow eager implementation of the SPINN SNLI classifier.")
parser.add_argument("--data_root", type=str, default="/tmp/spinn-data",
help="Root directory in which the training data and "
"embedding matrix are found. See README.md for how to "
"generate such a directory.")
parser.add_argument("--sentence_len_limit", type=int, default=-1,
help="Maximum allowed sentence length (# of words). "
"The default of -1 means unlimited.")
parser.add_argument("--logdir", type=str, default="/tmp/spinn-logs",
help="Directory in which summaries will be written for "
"TensorBoard.")
parser.add_argument("--epochs", type=int, default=50,
help="Number of epochs to train.")
parser.add_argument("--batch_size", type=int, default=128,
help="Batch size to use during training.")
parser.add_argument("--d_proj", type=int, default=600,
help="Dimensions to project the word embedding vectors "
"to.")
parser.add_argument("--d_hidden", type=int, default=300,
help="Size of the hidden layer of the Tracker.")
parser.add_argument("--d_out", type=int, default=4,
help="Output dimensions of the SNLIClassifier.")
parser.add_argument("--d_mlp", type=int, default=1024,
help="Size of each layer of the multi-layer perceptron "
"of the SNLICLassifier.")
parser.add_argument("--n_mlp_layers", type=int, default=2,
help="Number of layers in the multi-layer perceptron "
"of the SNLICLassifier.")
parser.add_argument("--d_tracker", type=int, default=64,
help="Size of the tracker LSTM.")
parser.add_argument("--log_every", type=int, default=50,
help="Print log and write TensorBoard summary every _ "
"training batches.")
parser.add_argument("--lr", type=float, default=2e-3,
help="Initial learning rate.")
parser.add_argument("--lr_decay_by", type=float, default=0.75,
help="The ratio to multiply the learning rate by every "
"time the learning rate is decayed.")
parser.add_argument("--lr_decay_every", type=float, default=1,
help="Decay the learning rate every _ epoch(s).")
parser.add_argument("--dev_every", type=int, default=1000,
help="Run evaluation on the dev split every _ training "
"batches.")
parser.add_argument("--save_every", type=int, default=1000,
help="Save checkpoint every _ training batches.")
parser.add_argument("--embed_dropout", type=float, default=0.08,
help="Word embedding dropout rate.")
parser.add_argument("--mlp_dropout", type=float, default=0.07,
help="SNLIClassifier multi-layer perceptron dropout "
"rate.")
parser.add_argument("--no-projection", action="store_false",
dest="projection",
help="Whether word embedding vectors are projected to "
"another set of vectors (see d_proj).")
parser.add_argument("--predict_transitions", action="store_true",
dest="predict",
help="Whether the Tracker will perform prediction.")
parser.add_argument("--force_cpu", action="store_true", dest="force_cpu",
help="Force use CPU-only regardless of whether a GPU is "
"available.")
FLAGS, unparsed = parser.parse_known_args()
tfe.run(main=main, argv=[sys.argv[0]] + unparsed)
| jwlawson/tensorflow | third_party/examples/eager/spinn/spinn.py | Python | apache-2.0 | 29,639 |
import os
import pickle
from django.shortcuts import render
from django.http import HttpResponse
from django.db.models import Q
from models import Disease, SubmatrixData
def navigation_autocomplete(request,
template_name='autocomplete.html'):
q = request.GET.get('q', '')
context = {'q': q}
queries = {}
queries['concepts'] = Disease.objects.filter(
Q(concept__icontains=q) |
Q(synonyms__icontains=q)
).distinct()[:20]
context.update(queries)
return render(request, template_name, context)
def createJSON(term, method, nodes):
import numpy
import itertools
import re
from termDict import termIndices # dictionary for index of disease terms
# Find index of searched term
termIndex = termIndices[term]
# Load citation freq matrix based on method, create submatrix base on node num
if method == 'cooccurrence':
matFile_path = os.path.join(os.path.dirname(__file__),'data','diseaseWithPsychCitationGMStat.npy')
elif method == 'similarity':
matFile_path = os.path.join(os.path.dirname(__file__),'data','diseaseWithPsychCitationCosine.npy')
else:
matFile_path = ''
cFreqMat = numpy.load(matFile_path,'r')
cFreqMat_row = cFreqMat[termIndex,:]
nodesIndicesSorted = numpy.argsort(cFreqMat_row, kind='mergesort')
nodesIndicesSorted = nodesIndicesSorted[::-1]
nodesIndicesTop = nodesIndicesSorted[1:nodes+1]
# Load xml file of MeSH diseases
import xml.etree.ElementTree as ET
dxList_tree = ET.parse(os.path.join(os.path.dirname(__file__),'data','diseaseListWithPsychEss.xml'))
dxList_root = dxList_tree.getroot()
'''# Running list of tree numbers
treeNumList = []
disease = dxList_root.find('.//*[@indexnum="%d"]' % termIndex) # add tree numbers of primary disease
for treenumPrimary in disease.findall('TreeNumberList/TreeNumber'):
treeNumList.append(treenumPrimary.text)
# Retrieve top nodes based on link strength, preclude all nodes which are in a parent or child class
nodesIndicesTop = []
count = 0
pos = nodesIndicesSorted.shape[0]-2 # The top value will be the singular frequency of term, start with pairwise frequencies
while (count < nodes):
disease = dxList_root.find('.//*[@indexnum="%d"]' % nodesIndicesSorted[pos])
parentClass = False
for treenumA in disease.findall('TreeNumberList/TreeNumber'):
for treenumB in treeNumList:
if (treenumA.text in treenumB) or (treenumB in treenumA.text):
parentClass = True
if not parentClass:
nodesIndicesTop.append(nodesIndicesSorted[pos])
for treenumA in disease.findall('TreeNumberList/TreeNumber'):
treeNumList.append(treenumA.text)
count += 1
pos -= 1
else:
pos -= 1'''
# Form sub-matrix based on list of top nodes
nodesIndices = numpy.insert(nodesIndicesTop,0,termIndex)
cFreqMat_reduced = cFreqMat[nodesIndices,:][:,nodesIndices]
'''# Convert submatrix into json data variable, pass to template
jsondata = '{"nodes":['
for k in range(nodes+1):
disease = dxList_root.find('.//*[@indexnum="%d"]' % nodesIndices[k])
# re.escape escapes special characters, especially apostrophes, which cause problems as javascript variable
node_jsonEntry = '{"name":"%s",' % re.escape(disease.find('Name').text)
node_jsonEntry = node_jsonEntry + '"size":%d,' % cFreqMat_reduced[k,k]
node_jsonEntry = node_jsonEntry + '"group":['
if k==0:
catnum = 0
catname = 'primary'
node_jsonEntry = node_jsonEntry + '{"groupnum":%d,' % catnum
node_jsonEntry = node_jsonEntry + '"groupname":"%s"},' % catname
else:
for cat in disease.findall('CatList/Category'):
catnum = int(cat.find('CatNum').text)
catname = cat.find('CatName').text
node_jsonEntry = node_jsonEntry + '{"groupnum":%d,' % catnum
node_jsonEntry = node_jsonEntry + '"groupname":"%s"},' % catname
node_jsonEntry = node_jsonEntry[:-1]
node_jsonEntry = node_jsonEntry + ']},'
jsondata = jsondata + node_jsonEntry
jsondata = jsondata[:-1]
jsondata = jsondata + '], "links":['
for m, n in itertools.combinations(range(nodes+1), 2):
link_jsonEntry = '{"source":%d,' % m
link_jsonEntry = link_jsonEntry + '"target":%d,' % n
link_jsonEntry = link_jsonEntry + '"coefficient":%f},' % cFreqMat_reduced[m,n]
jsondata = jsondata + link_jsonEntry
jsondata = jsondata[:-1]
jsondata = jsondata + ']}'
return jsondata'''
nodes_array = []
links_array = []
for k in range(nodes+1):
disease = dxList_root.find('.//*[@indexnum="%d"]' % nodesIndices[k])
node_groupArray = []
if k==0:
catnum = 0
catname = "primary"
node_groupArray.append({"groupnum": catnum, "groupname": catname})
else:
for cat in disease.findall('CatList/Category'):
catnum = int(cat.find('CatNum').text)
catname = cat.find('CatName').text
node_groupArray.append({"groupnum": catnum, "groupname": catname})
nodes_array.append({"name": disease.find('Name').text, "size": numpy.asscalar(cFreqMat_reduced[k,k]), "group": node_groupArray})
for m, n in itertools.combinations(range(nodes+1), 2):
links_array.append({"source": m, "target": n, "coefficient": numpy.asscalar(cFreqMat_reduced[m,n])})
data = {"nodes": nodes_array, "links": links_array}
return data
def getJSON(term, method, nodes):
try:
submatrixObj = SubmatrixData.objects.get(term=term, method=method, nodes=nodes)
dataFile = open(submatrixObj.dataFilePath, 'rb')
data = pickle.load(dataFile)
dataFile.close()
except:
data = createJSON(term, method, nodes)
import uuid
uid_filename = str(uuid.uuid4())
dataFilePath = os.path.join(os.path.dirname(__file__), 'submatrices', uid_filename + '.pkl')
dataFile = open(dataFilePath, 'wb')
pickle.dump(data, dataFile)
dataFile.close()
newdatafile = SubmatrixData(term=term, method=method, nodes=nodes, dataFilePath=dataFilePath)
newdatafile.save()
return data
def getJSONData(request):
import simplejson as json
if request.is_ajax():
try:
term = request.GET['term']
method = request.GET['method']
nodes = int(request.GET['nodes'])
data = getJSON(term, method, nodes)
except Exception as e:
print e
else:
data = 'fail'
mimetype = 'application/json'
return HttpResponse(json.dumps(data), mimetype)
# Request handler for graph button pressed
def graph(request):
term = request.GET['q']
method = request.GET['metric']
nodes = int(request.GET['nodenum'])
# Render page from template
template_values = {
'term': term,
'method': method,
'nodes': nodes,
'keylabel': 'Key:',
'graph': True}
return render(request, 'index.html', template_values)
def index(request):
import random
from termDict import termIndices
term = random.sample(termIndices.viewkeys(), 1)[0]
method = 'cooccurrence'
nodes = 15
template_values = {
'term': term,
'method': method,
'nodes': nodes,
'keylabel': 'Key:',
'graph': True}
return render(request, 'index.html', template_values)
def contact(request):
from django.core.mail import send_mail
name = request.POST.get('f_name', None)
email = request.POST.get('f_email', None)
msg = request.POST.get('f_message', None)
try:
send_mail('DiseaseLink - message from user', name + '\n' + email + '\n\n' + msg, '[email protected]', ['[email protected]'])
return HttpResponse(status=200)
except:
return HttpResponse(status=500)
| transcranial/diseasegraph | main/views.py | Python | mit | 8,086 |
# coding=utf-8
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training data generation for R-network."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import os
import subprocess
import sys
import tempfile
from absl import app
from absl import flags
from absl import logging
import concurrent.futures
from episodic_curiosity import constants
from episodic_curiosity import env_factory
from episodic_curiosity import r_network_training
from episodic_curiosity.constants import Const
from episodic_curiosity.environments import dmlab_utils
import gin
import numpy as np
import png
import tensorflow as tf
FLAGS = flags.FLAGS
flags.DEFINE_string(
'workdir', None,
'Root directory for writing training data for the R network.')
flags.DEFINE_string('dmlab_homepath', '', '')
flags.DEFINE_string(
'fully_qualified_level', 'explore_goal_locations_large',
'Level to train on. Name should match fully qualified '
'names in constants.Const')
flags.DEFINE_integer(
'total_env_steps', 2500000,
'Total number of environment steps (across all tasks). When '
'max_action_distance=5, we need (on average) 4 env steps to generate a '
'single training example.')
flags.DEFINE_integer(
'num_examples_per_output_shard', 4000,
'Number of examples for each output file shard. When training the R'
'network, all shards will be read in parallel, which contributes to '
'shuffling the data. Be mindful that changing this flag can affect the '
'quality of the shuffling.')
flags.DEFINE_integer(
'num_workers', 30,
'Number of parallel subprocesses to use for running DMLab. '
'If num_workers==1, we run DMLab directly without spawning a sub-process.')
flags.DEFINE_integer(
'num_tasks', 160,
'Number of parallel subprocesses to use for running DMLab. '
'If num_workers==1, we run DMLab directly without spawning a sub-process.')
flags.DEFINE_integer(
'task_id', -1,
'Task ID. If negative and num_workers>1, this binary will spawn '
'--num_workers subprocess workers. If non-negative or num_workers>1, '
'the binary is in worker mode: it generates and stores training example '
'for the R network using DMLab environment. If num_workers==1, there is '
'no master/worker concept, the main binary runs DMLab directly.')
flags.DEFINE_enum('split', 'R_TRAINING', [s.name for s in constants.SplitType],
'Split for which we generate the trajectories')
flags.DEFINE_enum(
'episode_length',
'default',
[
# Default level episode length (60, 90, 120 depending on level)
'default',
'180',
# This corresponds to ~10k actions (initial R-network training that is
# known to work well).
'600',
],
'Length of the episodes.')
flags.DEFINE_enum('action_set', '',
['', 'small', 'nofire', 'withidle', 'defaultwithidle',
'smallwithback'],
'Action set to use when generating R training data.')
flags.DEFINE_integer(
'max_action_distance', 5,
'Parameter that controls the maximum number of env steps '
'difference between two positive example frames generated '
'for training the R network')
flags.DEFINE_enum(
'max_action_distance_mode', 'v1_affect_num_training_examples', [
'v1_affect_num_training_examples',
'v2_fixed_num_training_examples',
'v3_affect_num_training_examples_overlap',
'v4_no_strides',
], 'Controls how max_action_distance affects the number of '
'training examples produced, given the same number '
'of environment steps.')
flags.DEFINE_float(
'avg_num_examples_per_env_step', 1,
'Has an effect only when max_action_distance_mode=v4_no_strides. '
'This is the average number of examples we produce for each input '
'environment step.')
# pylint: disable=g-inconsistent-quotes
flags.DEFINE_multi_string(
'gin_files', [], 'List of paths to gin configuration files')
flags.DEFINE_multi_string(
'gin_bindings', [],
'Gin bindings to override the values set in the config files '
'(e.g. "DQNAgent.epsilon_train=0.1",'
' "create_environment.game_name="Pong"").')
# pylint: enable=g-inconsistent-quotes
def generate_random_episode_buffer(env):
"""Generates random continuous gameplay."""
observation = env.reset()
episode_buffer = []
while True:
action = env.action_space.sample()
observation, _, done, info = env.step(action)
if done:
break
episode_buffer.append((observation, info))
return episode_buffer
def create_training_data_from_episode_buffer(episode_buffer):
"""Samples intervals and forms pairs."""
if FLAGS.max_action_distance_mode == 'v4_no_strides':
return r_network_training.create_training_data_from_episode_buffer_v4(
episode_buffer,
FLAGS.max_action_distance,
FLAGS.avg_num_examples_per_env_step)
else:
return r_network_training.create_training_data_from_episode_buffer_v123(
episode_buffer,
FLAGS.max_action_distance,
FLAGS.max_action_distance_mode)
def make_seed():
return 123 + FLAGS.task_id
def create_env(level):
"""Creates a DMLab environment for generating R training data."""
main_observation = 'DEBUG.CAMERA.PLAYER_VIEW_NO_RETICLE'
env_settings = dmlab_utils.create_env_settings(
level.dmlab_level_name,
homepath=FLAGS.dmlab_homepath,
width=Const.OBSERVATION_WIDTH,
height=Const.OBSERVATION_HEIGHT,
seed=make_seed(),
main_observation=main_observation)
env_settings.update(level.extra_env_settings)
env_settings.update(
mixerSeed=Const.MIXER_SEEDS[constants.SplitType[FLAGS.split]])
if FLAGS.episode_length != 'default':
env_settings['episodeLengthSeconds'] = float(FLAGS.episode_length)
# Saves those parameters since env_settings can be modified by
# the DMLabWrapper.
seed = env_settings['seed']
mixer_seed = env_settings['mixerSeed']
return dmlab_utils.DMLabWrapper(
'dmlab',
env_settings,
action_set=env_factory.get_action_set(FLAGS.action_set),
action_repeat=Const.ACTION_REPEAT,
main_observation=main_observation
), seed, mixer_seed
def add_image_feature(example, feature_name,
image):
"""Adds an image feature to the tf Example."""
byte_buffer = io.BytesIO()
png.from_array(image, 'RGB').save(byte_buffer)
example.features.feature[feature_name].bytes_list.value.append(
byte_buffer.getvalue())
def add_integer_feature(example, feature_name,
integer):
"""Adds an integer feature to the tf Example."""
example.features.feature[feature_name].int64_list.value.append(integer)
def add_float_feature(example, feature_name,
value):
"""Adds an float feature to the tf Example."""
if isinstance(value, float):
example.features.feature[feature_name].float_list.value.append(value)
else:
example.features.feature[feature_name].float_list.value.extend(
value.flatten())
def add_bytes_feature(example, feature_name,
feature_value):
"""Adds a bytes feature to the tf Example."""
example.features.feature[feature_name].bytes_list.value.append(feature_value)
def get_sharded_filename(task_id, shard_id, tmp=False):
return os.path.join(
FLAGS.workdir, '{}r_training_data_{}_{}.tfrecords'.format(
'tmp_' if tmp else '', task_id, shard_id))
def generate_r_training_data():
"""Runs R training data generation."""
env, seed, mixer_seed = create_env(
Const.find_level(FLAGS.fully_qualified_level))
total_examples = 0
env_steps = 0
examples_in_shard = 0
episode = 0
shard = 0
writer = None
max_task_env_steps = FLAGS.total_env_steps // FLAGS.num_tasks
logging.info('Task %d will run %d env steps',
FLAGS.task_id, max_task_env_steps)
while env_steps < max_task_env_steps:
if (examples_in_shard >= FLAGS.num_examples_per_output_shard or
writer is None):
if writer:
writer.close()
tf.gfile.Rename(
get_sharded_filename(FLAGS.task_id, shard, tmp=True),
get_sharded_filename(FLAGS.task_id, shard, tmp=False),
overwrite=True)
shard += 1
logging.info('Starting shard %d for task %d', shard, FLAGS.task_id)
writer = tf.python_io.TFRecordWriter(
get_sharded_filename(FLAGS.task_id, shard, tmp=True))
examples_in_shard = 0
episode_buffer = generate_random_episode_buffer(env)
start_position = None
if episode_buffer:
start_position = episode_buffer[0][1]['position']
x1, x2, labels = create_training_data_from_episode_buffer(episode_buffer)
for example_index_in_episode, features in enumerate(zip(x1, x2, labels)):
xx1, xx2, label = features
example = tf.train.Example()
add_image_feature(example, 'x1', xx1[0])
add_image_feature(example, 'x2', xx2[0])
add_integer_feature(example, 'label', label)
add_integer_feature(example, 'seed', seed)
add_integer_feature(example, 'mixer_seed', mixer_seed)
example.features.feature['fully_qualified_level'].bytes_list.value.append(
FLAGS.fully_qualified_level.encode('utf-8'))
# This is the episode index for the current generation task.
add_integer_feature(example, 'episode', episode)
# This is the index of the example in the current episode.
add_integer_feature(example, 'example_index_in_episode',
example_index_in_episode)
# This is the index of the example for the current generation task.
add_integer_feature(example, 'example_index',
total_examples + example_index_in_episode)
add_integer_feature(example, 'env_steps_at_episode', env_steps)
# Filtering by this feature leads to a dataset that is equivalent to one
# generated with the given number of total environment steps (modulo
# boundary effects).
add_integer_feature(example, 'global_env_steps_at_episode',
env_steps * FLAGS.num_tasks)
add_bytes_feature(example, 'x1/maze_layout',
xx1[1]['maze_layout'].encode('ascii'))
# For now, x1 and x2 are in the same maze. However, this may not be true
# in the near future.
add_bytes_feature(example, 'x2/maze_layout',
xx2[1]['maze_layout'].encode('ascii'))
add_float_feature(example, 'x1/position', xx1[1]['position'])
add_float_feature(example, 'x2/position', xx2[1]['position'])
add_float_feature(example, 'x1/dist_from_start',
np.linalg.norm(xx1[1]['position'] - start_position))
add_float_feature(example, 'x2/dist_from_start',
np.linalg.norm(xx2[1]['position'] - start_position))
add_float_feature(example, 'x1/rotation', xx1[1]['rotation'])
add_float_feature(example, 'x2/rotation', xx2[1]['rotation'])
add_float_feature(example, 'x1/velocity', xx1[1]['velocity'])
add_float_feature(example, 'x2/velocity', xx2[1]['velocity'])
example.features.feature['episode_length'].bytes_list.value.append(
FLAGS.episode_length.encode('utf-8'))
example.features.feature['action_set'].bytes_list.value.append(
FLAGS.action_set.encode('utf-8'))
try:
noise_type = gin.query_parameter('DMLabWrapper.noise_type')
except ValueError:
noise_type = ''
add_bytes_feature(example, 'noise_type', noise_type.encode('utf-8'))
try:
tv_num_images = int(gin.query_parameter('DMLabWrapper.tv_num_images'))
except ValueError:
tv_num_images = 0
add_integer_feature(example, 'tv_num_images', tv_num_images)
add_integer_feature(example, 'max_action_distance',
FLAGS.max_action_distance)
writer.write(example.SerializeToString())
env_steps += len(episode_buffer)
examples_in_shard += len(x1)
total_examples += len(x1)
episode += 1
writer.close() # type: ignore
tf.gfile.Rename(
get_sharded_filename(FLAGS.task_id, shard, tmp=True),
get_sharded_filename(FLAGS.task_id, shard, tmp=False),
overwrite=True)
def run_env_as_sub_process(task_id):
"""Spawns a subprocess that runs a DMLab environment."""
flags_dict = {f: FLAGS[f].value for f in FLAGS if FLAGS[f].present}
call = [
'python',
'-m',
'episodic_curiosity.generate_r_training_data',
'--task_id=' + str(task_id),
] + ['--{}={}'.format(k, v) for k, v in flags_dict.items()]
logging.info('Starting task %d with: %s"', task_id, call)
output = subprocess.check_output(call)
logging.info('Received output from subprocess:\n %s', output)
def main(unused_argv):
gin.parse_config_files_and_bindings(FLAGS.gin_files,
FLAGS.gin_bindings)
if FLAGS.num_workers > 1 and FLAGS.task_id >= 0:
generate_r_training_data()
return
if tf.gfile.Exists(FLAGS.workdir):
# Start on a clean state. It is not necessarily safe to restart the code
# above when some tfrecord files already exist.
tf.gfile.DeleteRecursively(FLAGS.workdir)
tf.gfile.MakeDirs(FLAGS.workdir)
if FLAGS.num_workers == 1:
generate_r_training_data()
else:
logging.info('running %d workers', FLAGS.num_workers)
with concurrent.futures.ThreadPoolExecutor(FLAGS.num_workers) as executor:
successful_tasks = 0
failed_tasks = 0
next_task_id = 0
while successful_tasks < FLAGS.num_tasks:
assert failed_tasks < 40, (
'Too many failures ({} failures, {} successes)'.format(
failed_tasks, successful_tasks))
remaining_tasks = FLAGS.num_tasks - successful_tasks
logging.info('Scheduling %d remaining tasks', remaining_tasks)
results = []
for _ in range(remaining_tasks):
results.append(executor.submit(run_env_as_sub_process, next_task_id))
next_task_id += 1
for result in results:
if result.exception() is None:
successful_tasks += 1
logging.info('One successful task returned (total successful: %d).',
successful_tasks)
else:
failed_tasks += 1
logging.info('Failed task (%d): %s',
failed_tasks, result.exception())
if __name__ == '__main__':
app.run(main)
| google-research/episodic-curiosity | episodic_curiosity/generate_r_training_data.py | Python | apache-2.0 | 15,050 |
# -*- coding: utf-8 -*-
from pCMS.pcomments.forms import pCMSCommentForm
def get_form():
return pCMSCommentForm
| sv1jsb/pCMS | pCMS/pcomments/__init__.py | Python | bsd-3-clause | 118 |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""
Use genetic map to break chimeric scaffolds, order and orient scaffolds onto
chromosomes.
"""
import os.path as op
import sys
import logging
import numpy as np
from collections import Counter
from functools import lru_cache
from itertools import combinations, groupby
from jcvi.formats.base import BaseFile, LineFile, must_open, read_block
from jcvi.formats.bed import Bed, fastaFromBed
from jcvi.apps.base import OptionParser, ActionDispatcher, need_update
MSTheader = """population_type {0}
population_name LG
distance_function kosambi
cut_off_p_value 0.000001
no_map_dist 10.0
no_map_size 0
missing_threshold {1}
estimation_before_clustering no
detect_bad_data yes
objective_function ML
number_of_loci {2}
number_of_individual {3}
"""
class BinMap(BaseFile, dict):
def __init__(self, filename):
super(BinMap, self).__init__(filename)
fp = open(filename)
for header, seq in read_block(fp, "group "):
lg = header.split()[-1]
self[lg] = []
for s in seq:
if s.strip() == "" or s[0] == ";":
continue
marker, pos = s.split()
pos = int(float(pos) * 1000)
self[lg].append((marker, pos))
def print_to_bed(self, filename="stdout", switch=False, sep="."):
"""Print the genetic map in the BED format.
Args:
filename (str, optional): Output filename. Defaults to "stdout".
switch (bool, optional): Use linkage group as seqid. Defaults to False.
sep (str, optional): Separator that delimits scaffold name and position. Defaults to ".".
"""
fw = must_open(filename, "w")
for lg, markers in sorted(self.items()):
for marker, pos in markers:
if not switch:
line = (lg, pos, pos + 1, marker)
else:
seqid_spos = marker.rsplit(sep, 1)
if len(seqid_spos) != 2:
logging.error(
"Error: `{}` must be in the form e.g. `name{}position`".format(
marker, sep
)
)
continue
seqid, spos = seqid_spos
spos = int(spos)
marker = "{0}:{1}".format(lg, pos / 1000.0)
line = (seqid, spos - 1, spos, marker)
print("\t".join(str(x) for x in line), file=fw)
fw.close()
class MSTMapLine(object):
def __init__(self, row, startidx=3):
args = row.split()
self.id = args[0]
self.seqid, pos = self.id.split(".")
self.pos = int(pos)
self.genotype = "".join(args[startidx:])
def __len__(self):
return len(self.genotype)
def __str__(self):
return "{0}: {1}".format(self.id, self.genotype)
@property
def bedline(self):
return "\t".join(str(x) for x in (self.seqid, self.pos - 1, self.pos, self.id))
class MSTMap(LineFile):
def __init__(self, filename):
super(MSTMap, self).__init__(filename)
fp = open(filename)
startidx = 1
for row in fp:
if row.startswith("locus_name"):
if row.split()[1] == "seqid":
startidx = 3
self.header = row.split()
break
for row in fp:
self.append(MSTMapLine(row, startidx=startidx))
self.nmarkers = len(self)
self.nind = len(self[0].genotype)
logging.debug(
"Map contains {0} markers in {1} individuals".format(
self.nmarkers, self.nind
)
)
class MSTMatrix(object):
def __init__(self, matrix, markerheader, population_type, missing_threshold):
self.matrix = matrix
self.markerheader = markerheader
self.population_type = population_type
self.missing_threshold = missing_threshold
self.ngenotypes = len(matrix)
self.nind = len(markerheader) - 1
assert self.nind == len(matrix[0]) - 1
logging.debug(
"Imported {0} markers and {1} individuals.".format(
self.ngenotypes, self.nind
)
)
def write(self, filename="stdout", header=True):
fw = must_open(filename, "w")
if header:
print(
MSTheader.format(
self.population_type,
self.missing_threshold,
self.ngenotypes,
self.nind,
),
file=fw,
)
print("\t".join(self.markerheader), file=fw)
for m in self.matrix:
print("\t".join(m), file=fw)
def main():
actions = (
("breakpoint", "find scaffold breakpoints using genetic map"),
("ld", "calculate pairwise linkage disequilibrium"),
("bed", "convert MSTmap output to bed format"),
("fasta", "extract markers based on map"),
("anchor", "anchor scaffolds based on map"),
("rename", "rename markers according to the new mapping locations"),
("header", "rename lines in the map header"),
# Plot genetic map
("blat", "make ALLMAPS input csv based on sequences"),
("dotplot", "make dotplot between chromosomes and linkage maps"),
)
p = ActionDispatcher(actions)
p.dispatch(globals())
def blat(args):
"""
%prog blat map1.txt ref.fasta
Make ALLMAPS input csv based on sequences. The tab-delimited txt file
include: name, LG, position, sequence.
"""
from jcvi.formats.base import is_number
from jcvi.formats.blast import best as blast_best, bed as blast_bed
from jcvi.apps.align import blat as blat_align
p = OptionParser(blat.__doc__)
_, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
maptxt, ref = args
pf = maptxt.rsplit(".", 1)[0]
register = {}
fastafile = pf + ".fasta"
fp = open(maptxt)
fw = open(fastafile, "w")
for row in fp:
name, lg, pos, seq = row.split()
if not is_number(pos):
continue
register[name] = (pf + "-" + lg, pos)
print(">{0}\n{1}\n".format(name, seq), file=fw)
fw.close()
blatfile = blat_align([ref, fastafile])
bestfile = blast_best([blatfile])
bedfile = blast_bed([bestfile])
b = Bed(bedfile).order
pf = ".".join((op.basename(maptxt).split(".")[0], op.basename(ref).split(".")[0]))
csvfile = pf + ".csv"
fp = open(maptxt)
fw = open(csvfile, "w")
for row in fp:
name, lg, pos, seq = row.split()
if name not in b:
continue
bbi, bb = b[name]
scaffold, scaffold_pos = bb.seqid, bb.start
print(",".join(str(x) for x in (scaffold, scaffold_pos, lg, pos)), file=fw)
fw.close()
def dotplot(args):
"""
%prog dotplot map.csv ref.fasta
Make dotplot between chromosomes and linkage maps.
The input map is csv formatted, for example:
ScaffoldID,ScaffoldPosition,LinkageGroup,GeneticPosition
scaffold_2707,11508,1,0
scaffold_2707,11525,1,1.2
"""
from natsort import natsorted
from jcvi.assembly.allmaps import CSVMapLine
from jcvi.formats.sizes import Sizes
from jcvi.graphics.base import shorten
from jcvi.graphics.dotplot import (
plt,
savefig,
markup,
normalize_axes,
downsample,
plot_breaks_and_labels,
thousands,
)
p = OptionParser(dotplot.__doc__)
p.set_outfile(outfile=None)
opts, args, iopts = p.set_image_options(
args, figsize="8x8", style="dark", dpi=90, cmap="copper"
)
if len(args) != 2:
sys.exit(not p.print_help())
csvfile, fastafile = args
sizes = natsorted(Sizes(fastafile).mapping.items())
seen = set()
raw_data = []
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1]) # the whole canvas
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # the dot plot
fp = must_open(csvfile)
for row in fp:
m = CSVMapLine(row)
seen.add(m.seqid)
raw_data.append(m)
# X-axis is the genome assembly
ctgs, ctg_sizes = zip(*sizes)
xsize = sum(ctg_sizes)
qb = list(np.cumsum(ctg_sizes))
qbreaks = list(zip(ctgs, [0] + qb, qb))
qstarts = dict(zip(ctgs, [0] + qb))
# Y-axis is the map
key = lambda x: x.lg
raw_data.sort(key=key)
ssizes = {}
for lg, d in groupby(raw_data, key=key):
ssizes[lg] = max([x.cm for x in d])
ssizes = natsorted(ssizes.items())
lgs, lg_sizes = zip(*ssizes)
ysize = sum(lg_sizes)
sb = list(np.cumsum(lg_sizes))
sbreaks = list(zip([("LG" + x) for x in lgs], [0] + sb, sb))
sstarts = dict(zip(lgs, [0] + sb))
# Re-code all the scatter dots
data = [
(qstarts[x.seqid] + x.pos, sstarts[x.lg] + x.cm, "g")
for x in raw_data
if (x.seqid in qstarts)
]
npairs = len(data)
data = downsample(data)
x, y, c = zip(*data)
ax.scatter(x, y, c=c, edgecolors="none", s=2, lw=0)
# Flip X-Y label
gy, gx = op.basename(csvfile).split(".")[:2]
gx, gy = shorten(gx, maxchar=30), shorten(gy, maxchar=30)
xlim, ylim = plot_breaks_and_labels(
fig, root, ax, gx, gy, xsize, ysize, qbreaks, sbreaks
)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
title = "Alignment: {} vs {}".format(gx, gy)
title += " ({} markers)".format(thousands(npairs))
root.set_title(markup(title), x=0.5, y=0.96, color="k")
logging.debug(title)
normalize_axes(root)
image_name = opts.outfile or (csvfile.rsplit(".", 1)[0] + "." + iopts.format)
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
fig.clear()
@lru_cache(maxsize=None)
def calc_ldscore(a, b):
assert len(a) == len(b), "{0}\n{1}".format(a, b)
# Assumes markers as A/B
c = Counter(zip(a, b))
c_aa = c[("A", "A")]
c_ab = c[("A", "B")]
c_ba = c[("B", "A")]
c_bb = c[("B", "B")]
n = c_aa + c_ab + c_ba + c_bb
if n == 0:
return 0
f = 1.0 / n
x_aa = c_aa * f
x_ab = c_ab * f
x_ba = c_ba * f
x_bb = c_bb * f
p_a = x_aa + x_ab
p_b = x_ba + x_bb
q_a = x_aa + x_ba
q_b = x_ab + x_bb
D = x_aa - p_a * q_a
denominator = p_a * p_b * q_a * q_b
if denominator == 0:
return 0
r2 = D * D / denominator
return r2
def ld(args):
"""
%prog ld map
Calculate pairwise linkage disequilibrium given MSTmap.
"""
from random import sample
from jcvi.algorithms.matrix import symmetrize
p = OptionParser(ld.__doc__)
p.add_option(
"--subsample",
default=1000,
type="int",
help="Subsample markers to speed up",
)
opts, args, iopts = p.set_image_options(args, figsize="8x8")
if len(args) != 1:
sys.exit(not p.print_help())
(mstmap,) = args
subsample = opts.subsample
data = MSTMap(mstmap)
markerbedfile = mstmap + ".subsample.bed"
ldmatrix = mstmap + ".subsample.matrix"
# Take random subsample while keeping marker order
if subsample < data.nmarkers:
data = [data[x] for x in sorted(sample(range(len(data)), subsample))]
else:
logging.debug("Use all markers, --subsample ignored")
nmarkers = len(data)
if need_update(mstmap, (ldmatrix, markerbedfile)):
fw = open(markerbedfile, "w")
print("\n".join(x.bedline for x in data), file=fw)
logging.debug(
"Write marker set of size {0} to file `{1}`.".format(
nmarkers, markerbedfile
)
)
fw.close()
M = np.zeros((nmarkers, nmarkers), dtype=float)
for i, j in combinations(range(nmarkers), 2):
a = data[i]
b = data[j]
M[i, j] = calc_ldscore(a.genotype, b.genotype)
M = symmetrize(M)
logging.debug("Write LD matrix to file `{0}`.".format(ldmatrix))
M.tofile(ldmatrix)
else:
nmarkers = len(Bed(markerbedfile))
M = np.fromfile(ldmatrix, dtype="float").reshape(nmarkers, nmarkers)
logging.debug("LD matrix `{0}` exists ({1}x{1}).".format(ldmatrix, nmarkers))
from jcvi.graphics.base import plt, savefig, Rectangle, draw_cmap
plt.rcParams["axes.linewidth"] = 0
fig = plt.figure(1, (iopts.w, iopts.h))
root = fig.add_axes([0, 0, 1, 1])
ax = fig.add_axes([0.1, 0.1, 0.8, 0.8]) # the heatmap
ax.matshow(M, cmap=iopts.cmap)
# Plot chromosomes breaks
bed = Bed(markerbedfile)
xsize = len(bed)
extent = (0, nmarkers)
chr_labels = []
ignore_size = 20
for (seqid, beg, end) in bed.get_breaks():
ignore = abs(end - beg) < ignore_size
pos = (beg + end) / 2
chr_labels.append((seqid, pos, ignore))
if ignore:
continue
ax.plot((end, end), extent, "w-", lw=1)
ax.plot(extent, (end, end), "w-", lw=1)
# Plot chromosome labels
for label, pos, ignore in chr_labels:
pos = 0.1 + pos * 0.8 / xsize
if not ignore:
root.text(
pos, 0.91, label, ha="center", va="bottom", rotation=45, color="grey"
)
root.text(0.09, pos, label, ha="right", va="center", color="grey")
ax.set_xlim(extent)
ax.set_ylim(extent)
ax.set_axis_off()
draw_cmap(root, "Pairwise LD (r2)", 0, 1, cmap=iopts.cmap)
root.add_patch(Rectangle((0.1, 0.1), 0.8, 0.8, fill=False, ec="k", lw=2))
m = mstmap.split(".")[0]
root.text(
0.5, 0.06, "Linkage Disequilibrium between {0} markers".format(m), ha="center"
)
root.set_xlim(0, 1)
root.set_ylim(0, 1)
root.set_axis_off()
image_name = m + ".subsample" + "." + iopts.format
savefig(image_name, dpi=iopts.dpi, iopts=iopts)
def header(args):
"""
%prog header map conversion_table
Rename lines in the map header. The mapping of old names to new names are
stored in two-column `conversion_table`.
"""
from jcvi.formats.base import DictFile
p = OptionParser(header.__doc__)
p.add_option("--prefix", default="", help="Prepend text to line number")
p.add_option("--ids", help="Write ids to file")
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mstmap, conversion_table = args
data = MSTMap(mstmap)
hd = data.header
conversion = DictFile(conversion_table)
newhd = [opts.prefix + conversion.get(x, x) for x in hd]
print("\t".join(hd))
print("--->")
print("\t".join(newhd))
ids = opts.ids
if ids:
fw = open(ids, "w")
print("\n".join(newhd), file=fw)
fw.close()
def rename(args):
"""
%prog rename map markers.bed > renamed.map
Rename markers according to the new mapping locations.
"""
p = OptionParser(rename.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mstmap, bedfile = args
markersbed = Bed(bedfile)
markers = markersbed.order
data = MSTMap(mstmap)
header = data.header
header = [header[0]] + ["seqid", "start"] + header[1:]
renamed = []
for b in data:
m, geno = b.id, b.genotype
om = m
if m not in markers:
m = m.rsplit(".", 1)[0]
if m not in markers:
continue
i, mb = markers[m]
renamed.append([om, mb.seqid, mb.start, "\t".join(list(geno))])
renamed.sort(key=lambda x: (x[1], x[2]))
fw = must_open(opts.outfile, "w")
print("\t".join(header), file=fw)
for d in renamed:
print("\t".join(str(x) for x in d), file=fw)
def anchor(args):
"""
%prog anchor map.bed markers.blast > anchored.bed
Anchor scaffolds based on map.
"""
from jcvi.formats.blast import bed
p = OptionParser(anchor.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mapbed, blastfile = args
bedfile = bed([blastfile])
markersbed = Bed(bedfile)
markers = markersbed.order
mapbed = Bed(mapbed, sorted=False)
for b in mapbed:
m = b.accn
if m not in markers:
continue
i, mb = markers[m]
new_accn = "{0}:{1}-{2}".format(mb.seqid, mb.start, mb.end)
b.accn = new_accn
print(b)
def bed(args):
"""
%prog fasta map.out
Convert MSTMAP output into bed format.
"""
p = OptionParser(bed.__doc__)
p.add_option(
"--switch",
default=False,
action="store_true",
help="Switch reference and aligned map elements",
)
p.add_option(
"--sep",
default=".",
help="Separator that is used to delimit scaffold and position in the marker name",
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(mapout,) = args
pf = mapout.split(".")[0]
mapbed = pf + ".bed"
bm = BinMap(mapout)
bm.print_to_bed(mapbed, switch=opts.switch, sep=opts.sep)
return mapbed
def fasta(args):
"""
%prog fasta map.out scaffolds.fasta
Extract marker sequences based on map.
"""
from jcvi.formats.sizes import Sizes
p = OptionParser(fasta.__doc__)
p.add_option(
"--extend",
default=1000,
type="int",
help="Extend seq flanking the gaps",
)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
mapout, sfasta = args
Flank = opts.extend
pf = mapout.split(".")[0]
mapbed = pf + ".bed"
bm = BinMap(mapout)
bm.print_to_bed(mapbed)
bed = Bed(mapbed, sorted=False)
markersbed = pf + ".markers.bed"
fw = open(markersbed, "w")
sizes = Sizes(sfasta).mapping
for b in bed:
accn = b.accn
scf, pos = accn.split(".")
pos = int(pos)
start = max(0, pos - Flank)
end = min(pos + Flank, sizes[scf])
print("\t".join(str(x) for x in (scf, start, end, accn)), file=fw)
fw.close()
fastaFromBed(markersbed, sfasta, name=True)
def hamming_distance(a, b, ignore=None):
dist = 0
for x, y in zip(a, b):
if ignore and ignore in (x, y):
continue
if x != y:
dist += 1
return dist
OK, BREAK, END = range(3)
def check_markers(a, b, maxdiff):
if a.seqid != b.seqid:
return END, None
diff = hamming_distance(a.genotype, b.genotype, ignore="-")
max_allowed = len(a) * maxdiff
if diff <= max_allowed:
return OK, None
return BREAK, (a.seqid, a.pos, b.pos)
def breakpoint(args):
"""
%prog breakpoint mstmap.input > breakpoints.bed
Find scaffold breakpoints using genetic map. Use variation.vcf.mstmap() to
generate the input for this routine.
"""
from more_itertools import pairwise
p = OptionParser(breakpoint.__doc__)
p.add_option(
"--diff",
default=0.1,
type="float",
help="Maximum ratio of differences allowed",
)
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
(mstmap,) = args
diff = opts.diff
data = MSTMap(mstmap)
# Remove singleton markers (avoid double cross-over)
good = []
nsingletons = 0
for i in range(1, len(data) - 1):
a = data[i]
left_label, left_rr = check_markers(data[i - 1], a, diff)
right_label, right_rr = check_markers(a, data[i + 1], diff)
if left_label == BREAK and right_label == BREAK:
nsingletons += 1
continue
good.append(a)
logging.debug("A total of {0} singleton markers removed.".format(nsingletons))
for a, b in pairwise(good):
label, rr = check_markers(a, b, diff)
if label == BREAK:
print("\t".join(str(x) for x in rr))
if __name__ == "__main__":
main()
| tanghaibao/jcvi | jcvi/assembly/geneticmap.py | Python | bsd-2-clause | 20,122 |
import json
import urllib
from django.core.mail import EmailMultiAlternatives
from django.core.serializers.json import DjangoJSONEncoder
from django.db import transaction, connection
from django.http import HttpResponse
from django.shortcuts import render_to_response
from library.f_lib import *
from library.models import *
def takeBI(request):
query = json.loads(str(request.body.decode()))
info = query["info"]
bi_id = query["bi_id"]
context = isauth(request)
if registrRevers(context):
return HttpResponse(json.dumps({"info": 4}))
person = context["person"]
item=BookItem.objects.get(pk=bi_id)
message=Message(item=item,personFrom=person,personTo=item.reader,date=timezone.now(),isRead=0)
message.save()
item.reader=person
item.save()
ss = SysSetting.objects.latest('id')
mail_title = 'Новое сообщение'
text_content = 'У вас забрали книгу: '
html_content = 'У вас забрали книгу: '
text_content += item.book.title +'\n'+'id экземпляра: '+ str(item.id)+'\n'+ss.system_address+\
'Ответьте на запрос\n' + \
'\n\nРаспределенная библиотека.'
html_content += '<strong>' + item.book.title + '</strong><br>' \
'id экземпляра: '+ str(item.id) +'<br>'+ss.system_address + \
'<br>Ответьте на запрос' + \
'<br><br>Распределенная библиотека.'
sendEmail(mail_title,text_content,html_content,[message.personTo.email])
return HttpResponse(json.dumps({"info": 1}))
def countInMessage(request):
context = isauth(request)
if registrRevers(context):
return HttpResponse(json.dumps({"info": 4}))
person = context["person"]
count=Message.objects.filter(personTo=person,isRead=0).count()
return HttpResponse(json.dumps({"info": 1,'count':count}))
def getMessages(request):
query = json.loads(str(request.body.decode()))
mType = query["mType"]
isRead = query["isRead"]
context = isauth(request)
if registrRevers(context):
return HttpResponse(json.dumps({"info": 4}))
person = context["person"]
if mType=="in":
messages=Message.objects.filter(personTo=person).order_by('date')[:50]
formattedMess=[]
for mess in messages:
formattedMess.append({"id": mess.id, "personFrom": mess.personFrom.natural_key(),
"date": mess.date, "item_id": mess.item.id, "book": mess.item.book.title, "isRead":mess.isRead})
return HttpResponse(json.dumps({"info": 1, "messages": formattedMess}, cls=DjangoJSONEncoder))
def readMessage(request):
query = json.loads(str(request.body.decode()))
info = query["info"]
context = isauth(request)
if registrRevers(context):
return HttpResponse(json.dumps({"info": 4}))
person = context["person"]
Message.objects.filter(personTo=person).update(isRead=1)
return HttpResponse(json.dumps({"info": 1}))
| artbart/DistributedLibrary | library/pac_views/v_messages.py | Python | mit | 3,107 |
from browser import ajax
class FileIO:
def __init__(self, data):
self._data=data
def read(self):
return self._data
def urlopen(url, data=None, timeout=None):
global result
result=None
def on_complete(req):
global result
result=req
_ajax=ajax.ajax()
_ajax.bind('complete', on_complete)
if timeout is not None:
_ajax.set_timeout(timeout)
if data is None:
_ajax.open('GET', url, False)
_ajax.send()
else:
_ajax.open('POST', url, False)
_ajax.send(data)
if isinstance(result.text, str):
return FileIO(result.text), url, result.headers
return FileIO(result.text()), url, result.headers
| andresmrm/brython-experiment | static/brython/Lib/urllib/request.py | Python | agpl-3.0 | 703 |
# -*- coding: utf-8 -*-
from Scraping4blog import Scraping4blog
import sys,os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + '/../util')
from settings import SettingManager
def main():
conf = SettingManager()
instance = Scraping4blog(conf)
instance.run()
if __name__ == "__main__":
main()
| yamanakahirofumi/mokobot | Scraping4blog/run.py | Python | mit | 323 |
import sys
import time
import math
import random
from modules import *
from psychopy import visual, core, data, event, logging, sound, gui, misc, monitors
from psychopy.constants import *
import numpy as np
import os
import Image
import ctypes
MONITOR_NAME = 'tomimac'
def display_grid(population, goal, grid_size):
grid = [['.' for col in xrange(grid_size)] for row in xrange(grid_size)]
for i in population:
grid[i.location[0]][i.location[1]] = 'B'
grid[goal[0]][goal[1]] = 'F'
for row in grid:
line = ''
for column in row:
line += column + ' '
print line
# end def display_grid
def generate_population(size):
population = []
for x in xrange(size):
b = brain.Brain(2,6,4)
i = bug.Bug(b, 100, 20, -20)
population.append(i)
return population
# end def generate_population
def train_population(population, length, size, generation):
count = 0
random.seed()
food = [random.randint(-20,20),random.randint(-20,20)]
for t in xrange(length):
for i in population:
i.goal = food
if i.time_tick():
count += 1
food = [random.randint(-20,20),random.randint(-20,20)]
#time.sleep(0.05)
new_population = []
while len(new_population) < size:
i1 = tournament_selection(population)
i2 = tournament_selection(population)
new_population.append(i1.procreate(i2,0.01))
if size > 20:
new_population.extend(generate_population(1))
return new_population
# end def train_population
def tournament_selection(population):
random.seed()
tournament_size = int(len(population) / 6)
best = population[random.randint(0,len(population)-1)]
for i in xrange(tournament_size):
individual = population[random.randint(0,len(population)-1)]
distance1 = ((individual.goal[0] - individual.location[0])**2) + ((individual.goal[1] - individual.location[1])**2)
distance2 = ((best.goal[0] - best.location[0])**2) + ((best.goal[1] - best.location[1])**2)
distance1 = math.sqrt(distance1) - individual.count
distance2 = math.sqrt(distance2) - best.count
if distance1 < distance2:
best = individual
return best
# def end tournament_selection
def mate_population(i1, i2):
random.seed()
b = brain.Brain(2,6,4)
for i in xrange(2):
for h in xrange(6):
if random.random() <= 0.01:
b.ih_weights[i][h] = random.uniform(-1.0,1.0)
else:
b.ih_weights[i][h] = random.choice([i1.brain.ih_weights[i][h],i2.brain.ih_weights[i][h]])
for h in xrange(6):
for o in xrange(4):
if random.random() <= 0.01:
b.ho_weights[h][o] = random.uniform(-1.0,1.0)
else:
b.ho_weights[h][o] = random.choice([i1.brain.ho_weights[h][o],i2.brain.ho_weights[h][o]])
new_bug = bug.Bug(b, 100)
return new_bug
# end def mate_population
def run_population(population, length, pop_list, win):
food_rect = visual.Rect(win, width=0.5, height=0.5, fillColor=[1,-1,-1], fillColorSpace='rgb', lineColor=[1,-1,-1], lineColorSpace='rgb')
count = 0
random.seed()
food = [random.randint(-20,20),random.randint(-20,20)]
food_rect.setPos(food)
food_rect.setAutoDraw(True)
for t in xrange(length):
for i in population:
i.goal = food
if i.time_tick():
count += 1
food = [random.randint(-20,20),random.randint(-20,20)]
food_rect.setPos(food)
food_rect.setAutoDraw(True)
for i in xrange(len(population)):
pop_list[i].setPos(population[i].location)
pop_list[i].setAutoDraw(True)
win.flip()
time.sleep(0.03)
if event.getKeys(["q"]):
exit()
food_rect.setAutoDraw(False)
win.flip()
# end def run_population
population = generate_population(100)
pop_size = 100
for i in xrange(1100):
if i%100 == 0 and pop_size > 20:
pop_size -= 10
population = train_population(population, 50, pop_size, i)
print "Generation: " + str(i)
# Setup the Psychopy variables (screen, stimuli, sounds, ect)
win = visual.Window(fullscr=True, screen=0, allowGUI=False, allowStencil=False, monitor=MONITOR_NAME, color=[0,0,0], colorSpace='rgb', units='deg')
mon = monitors.Monitor(MONITOR_NAME)
trialClock = core.Clock()
eventClock = core.Clock()
keyResp = event.BuilderKeyResponse() # create an object of type KeyResponse
mouse = event.Mouse(win=win)
file = open('saved_weights.csv','w')
pop_list = []
for i in population:
line = ''
for inputs in xrange(2):
for hidden in xrange(6):
line += str(i.brain.ih_weights[inputs][hidden]) + ','
for hidden in xrange(6):
for output in xrange(4):
line += str(i.brain.ho_weights[hidden][output]) + ','
line += '\n'
file.write(line)
file.flush()
pop_list.append(visual.Rect(win, width=0.5, height=0.5, fillColor=[-1,-1,-1], fillColorSpace='rgb', lineColor=[-1,-1,-1], lineColorSpace='rgb'))
file.close()
for i in xrange(20):
run_population(population, 100, pop_list, win) | Chippers255/bugs | old_source/test_psychopy.py | Python | mit | 5,397 |
import __builtin__
import json
from requests import post, get
import maps
from tendrl.monitoring_integration.grafana import utils
from tendrl.monitoring_integration.grafana import exceptions
HEADERS = {"Accept": "application/json",
"Content-Type": "application/json"
}
''' Create new organisation'''
def create_org(org_name):
config = maps.NamedDict(NS.config.data)
upload_str = {"name": org_name}
if utils.port_open(config.grafana_port, config.grafana_host):
response = post("http://{}:{}/api"
"/orgs".format(config.grafana_host,
config.grafana_port),
headers=HEADERS,
auth=config.credentials,
data=json.dumps(upload_str))
try:
return json.loads(response.content)["orgId"]
except KeyError:
return None
else:
raise exceptions.ConnectionFailedException
''' Get particular organisation by name '''
def get_org_id(org_name):
config = maps.NamedDict(NS.config.data)
if utils.port_open(config.grafana_port, config.grafana_host):
resp = get("http://{}:{}/api/orgs/name/"
"{}".format(config.grafana_host,
config.grafana_port,
org_name),
auth=config.credentials)
try:
return resp.content
except (KeyError, AttributeError):
return None
else:
raise exceptions.ConnectionFailedException
''' Switch context to particular org '''
def switch_context(org_id):
config = maps.NamedDict(NS.config.data)
upload_str = ''
if utils.port_open(config.grafana_port, config.grafana_host):
response = post("http://{}:{}/api/user/using"
"/{}".format(config.grafana_host,
config.grafana_port,
org_id),
headers=HEADERS,
auth=config.credentials,
data=upload_str)
try:
if "changed" in json.loads(response.content)["message"]:
return True
else:
return False
except KeyError:
return False
else:
raise exceptions.ConnectionFailedException
def create_api_token(key_name, role):
config = maps.NamedDict(NS.config.data)
request_body = {"name": key_name, "role": role}
if utils.port_open(config.grafana_port, config.grafana_host):
response = post("http://{}:{}/api/auth/"
"keys".format(config.grafana_host,
config.grafana_port),
headers=HEADERS,
auth=config.credentials,
data=json.dumps(request_body))
try:
return json.loads(response.content)["key"]
except KeyError:
return None
| rishubhjain/monitoring-integration | tendrl/monitoring_integration/grafana/grafana_org_utils.py | Python | lgpl-2.1 | 3,039 |
# setup.py
from distutils.core import setup
import py2exe
setup(name='WrestlingNerd',
version='3.2',
author='Peter Parente',
author_email='[email protected]',
url='http://wnerd.sourceforge.net',
description='Wrestling Nerd: Wrestling tournament management software',
options = {'py2exe': {'compressed': 1, 'optimize': 2}},
windows = [{'script': 'WrestlingNerd.py', 'icon_resources': [(1, 'WrestlingNerd_wdr/nerd.ico')]}],
data_files=[('WrestlingNerd_wdr', ['WrestlingNerd_wdr/bout.png', 'WrestlingNerd_wdr/LogoBitmaps_0.png', 'WrestlingNerd_wdr/nerd16.ico']),
('', ['LICENSE.txt']),
('layouts', ['layouts/CTOpen.yml', 'layouts/CTStates.yml', 'layouts/BCInvite.yml'])]
)
| parente/wnerd | setup.py | Python | mit | 762 |
# -*- coding: utf-8 -*-
##############################################################################
#
# This file is part of mozaik_mandate, an Odoo module.
#
# Copyright (c) 2015 ACSONE SA/NV (<http://acsone.eu>)
#
# mozaik_mandate is free software:
# you can redistribute it and/or
# modify it under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# mozaik_mandate is distributed in the hope that it will
# be useful but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the
# GNU Affero General Public License
# along with mozaik_mandate.
# If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import tempfile
import base64
from openerp.tools.translate import _
from anybox.testing.openerp import SharedSetupTransactionCase
from openerp.addons.mozaik_mandate.wizard \
import electoral_results_wizard as wizard_class
_logger = logging.getLogger(__name__)
class test_electoral_results_wizard(SharedSetupTransactionCase):
_data_files = (
'../../mozaik_base/tests/data/res_partner_data.xml',
'../../mozaik_structure/tests/data/structure_data.xml',
'data/mandate_data.xml',
)
_module_ns = 'mozaik_mandate'
def setUp(self):
super(test_electoral_results_wizard, self).setUp()
self.district_01 = self.browse_ref(
'%s.electoral_district_01' % self._module_ns)
self.legislature_01_id = self.ref(
'%s.legislature_01' % self._module_ns)
self.sta_paul_communal = self.browse_ref(
'%s.sta_paul_communal' % self._module_ns)
self.sta_pauline_communal = self.browse_ref(
'%s.sta_pauline_communal' % self._module_ns)
self.sta_marc_communal = self.browse_ref(
'%s.sta_marc_communal' % self._module_ns)
self.sta_thierry_communal = self.browse_ref(
'%s.sta_thierry_communal' % self._module_ns)
self.sta_jacques_communal = self.browse_ref(
'%s.sta_jacques_communal' % self._module_ns)
self.committee_id = self.ref(
'%s.sc_tete_huy_communale' % self._module_ns)
committee_pool = self.registry('sta.selection.committee')
candidature_pool = self.registry['sta.candidature']
accepted_ids = [self.sta_paul_communal.id,
self.sta_pauline_communal.id,
self.sta_thierry_communal.id,
self.sta_jacques_communal.id]
rejected_ids = [self.sta_marc_communal.id]
candidature_pool.signal_workflow(self.cr,
self.uid,
accepted_ids,
'button_suggest')
candidature_pool.signal_workflow(self.cr,
self.uid,
rejected_ids,
'button_reject')
committee_pool.write(self.cr, self.uid, [self.committee_id],
{'decision_date': '2014-04-01'})
committee_pool.button_accept_candidatures(
self.cr, self.uid, [self.committee_id])
def test_electoral_results_wizard_wrong_file(self):
'''
Import electoral results
'''
candidature_pool = self.registry['sta.candidature']
candidature_pool.signal_workflow(self.cr,
self.uid,
[self.sta_paul_communal.id],
'button_elected')
temp_file = tempfile.SpooledTemporaryFile(mode='w+r')
temp_file.write(','.join(wizard_class.file_import_structure) + '\n')
# wrong row size
data = ['a', 'b']
temp_file.write(','.join(data) + '\n')
# votes non numerical
data = ['test', '', 'Toto', 'a', '', '']
temp_file.write(','.join(data) + '\n')
# position non numerical
data = ['test', '', 'Toto', '3', 'a', '']
temp_file.write(','.join(data) + '\n')
# position non elected non numerical
data = ['test', '', 'Toto', '3', '2', 'a']
temp_file.write(','.join(data) + '\n')
# unknown district
data = ['test', '', 'Toto', '3', '2', '1']
temp_file.write(','.join(data) + '\n')
# unknown candidate
data = [self.district_01.name, '', 'Toto', '3', '2', '']
temp_file.write(','.join(data) + '\n')
# bad candidature state
data = [self.district_01.name, '', self.sta_marc_communal.partner_name,
'3', '2', '']
temp_file.write(','.join(data) + '\n')
# elected candidate with position non elected set
data = [self.district_01.name, '', self.sta_paul_communal.partner_name,
'3', '', '1']
temp_file.write(','.join(data) + '\n')
# inconsistent value for column E/S
data = [self.district_01.name, 'B',
self.sta_pauline_communal.partner_name, '3', '2', '1']
temp_file.write(','.join(data) + '\n')
# inconsistent value for column E/S with candidature settings
data = [self.district_01.name, '',
self.sta_thierry_communal.partner_name, '3', '2', '1']
temp_file.write(','.join(data) + '\n')
# Effective line with substitute candidature
data = [self.district_01.name, 'E',
self.sta_thierry_communal.partner_name, '3', '2', '1']
temp_file.write(','.join(data) + '\n')
# Substitute line with effective candidature
data = [self.district_01.name, 'S',
self.sta_pauline_communal.partner_name, '3', '2', '1']
temp_file.write(','.join(data) + '\n')
# Position non elected should not be set with e_S value
data = [self.district_01.name, 'E',
self.sta_pauline_communal.partner_name, '3', '2', '1']
temp_file.write(','.join(data) + '\n')
# Position and position non elected can not be set both
data = [self.district_01.name, '',
self.sta_paul_communal.partner_name, '3', '2', '1']
temp_file.write(','.join(data) + '\n')
temp_file.seek(0)
data_file = temp_file.read()
temp_file.close()
context = {
'active_ids': [self.legislature_01_id],
'active_model': 'legislature',
}
wizard_pool = self.registry('electoral.results.wizard')
wiz_id = wizard_pool.create(
self.cr, self.uid,
{'source_file': base64.encodestring(data_file)},
context=context)
wizard_pool.validate_file(self.cr, self.uid, [wiz_id])
wizard = wizard_pool.browse(self.cr, self.uid, wiz_id)
self.assertEqual(len(wizard.error_lines), 14)
for error in wizard.error_lines:
if error.line_number == 2:
expected_msg = _('Wrong number of columns(%s), '
'%s expected!') % \
(2, len(wizard_class.file_import_structure))
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 3:
expected_msg = _('Votes value should be integer: %s') % 'a'
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 4:
expected_msg = _('Position value should be integer: %s') % 'a'
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 5:
expected_msg = _('Position non elected value should '
'be integer: %s') % 'a'
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 6:
expected_msg = _('Unknown district: %s') % 'test'
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 7:
expected_msg = _('Unknown candidate: %s') % 'Toto'
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 8:
expected_msg = _(
'Inconsistent state for candidature: %s') % 'rejected'
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 9:
expected_msg = _(
'Candidate is elected but position '
'non elected (%s) is set') % '1'
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 10:
expected_msg = _(
'Inconsistent value for column E/S: %s') % 'B'
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 11:
expected_msg = _('Candidature: inconsistent value for '
'column E/S: should be %s') % 'S'
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 12:
expected_msg = _('Candidature is not flagged as effective')
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 13:
expected_msg = _('Candidature is not flagged as substitute')
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 14:
expected_msg = _('Position non elected is incompatible '
'with e_s value: %s') % 'E'
self.assertEquals(error.error_msg, expected_msg)
elif error.line_number == 15:
expected_msg = _('Position(%s) and position non elected(%s) '
'can not be set both') % ('2', '1')
self.assertEquals(error.error_msg, expected_msg)
else:
pass
def test_electoral_results_wizard_elected(self):
'''
Import electoral results
'''
candidature_pool = self.registry['sta.candidature']
used_ids = [self.sta_paul_communal.id,
self.sta_pauline_communal.id,
self.sta_jacques_communal.id
]
temp_file = tempfile.SpooledTemporaryFile(mode='w+r')
temp_file.write(','.join(wizard_class.file_import_structure) + '\n')
data = [self.district_01.name, '',
self.sta_paul_communal.partner_name, '1258', '1', '']
temp_file.write(','.join(data) + '\n')
data = [self.district_01.name, 'E',
self.sta_pauline_communal.partner_name, '1258', '1', '']
temp_file.write(','.join(data) + '\n')
data = [self.district_01.name, 'S',
self.sta_jacques_communal.partner_name, '1258', '1', '']
temp_file.write(','.join(data) + '\n')
data = [self.district_01.name, 'E',
self.sta_jacques_communal.partner_name, '1258', '1', '']
temp_file.write(','.join(data) + '\n')
temp_file.seek(0)
data_file = temp_file.read()
temp_file.close()
context = {
'active_ids': [self.legislature_01_id],
'active_model': 'legislature',
}
wizard_pool = self.registry('electoral.results.wizard')
wiz_id = wizard_pool.create(
self.cr, self.uid,
{'source_file': base64.encodestring(data_file)},
context=context)
wizard_pool.validate_file(self.cr, self.uid, [wiz_id])
wizard = wizard_pool.browse(self.cr, self.uid, wiz_id)
self.assertEqual(len(wizard.error_lines), 0)
wizard_pool.import_file(self.cr, self.uid, [wiz_id])
for candidature in candidature_pool.browse(self.cr,
self.uid,
used_ids):
self.assertEqual(candidature.state, 'elected')
self.assertEqual(candidature.election_effective_position, 1)
self.assertEqual(candidature.effective_votes, 1258)
def test_electoral_results_wizard_non_elected(self):
'''
Import electoral results
'''
candidature_pool = self.registry['sta.candidature']
used_ids = [self.sta_paul_communal.id,
self.sta_pauline_communal.id,
self.sta_jacques_communal.id
]
temp_file = tempfile.SpooledTemporaryFile(mode='w+r')
temp_file.write(','.join(wizard_class.file_import_structure) + '\n')
data = [self.district_01.name, '',
self.sta_paul_communal.partner_name, '1258', '', '1']
temp_file.write(','.join(data) + '\n')
data = [self.district_01.name, 'E',
self.sta_pauline_communal.partner_name, '1258', '0', '']
temp_file.write(','.join(data) + '\n')
data = [self.district_01.name, 'S',
self.sta_jacques_communal.partner_name, '1258', '1', '']
temp_file.write(','.join(data) + '\n')
data = [self.district_01.name, 'E',
self.sta_jacques_communal.partner_name, '1258', '0', '']
temp_file.write(','.join(data) + '\n')
temp_file.seek(0)
data_file = temp_file.read()
temp_file.close()
context = {
'active_ids': [self.legislature_01_id],
'active_model': 'legislature',
}
wizard_pool = self.registry('electoral.results.wizard')
wiz_id = wizard_pool.create(
self.cr, self.uid,
{'source_file': base64.encodestring(data_file)},
context=context)
wizard_pool.validate_file(self.cr, self.uid, [wiz_id])
wizard = wizard_pool.browse(self.cr, self.uid, wiz_id)
self.assertEqual(len(wizard.error_lines), 0)
wizard_pool.import_file(self.cr, self.uid, [wiz_id])
for candidature in candidature_pool.browse(self.cr,
self.uid,
used_ids):
self.assertEqual(candidature.state, 'non-elected')
if candidature.id == self.sta_paul_communal.id:
self.assertEqual(candidature.election_substitute_position, 1)
self.assertEqual(candidature.substitute_votes, 1258)
else:
self.assertEqual(candidature.election_effective_position, 0)
self.assertEqual(candidature.effective_votes, 1258)
| acsone/mozaik | mozaik_mandate/tests/test_electoral_results_wizard.py | Python | agpl-3.0 | 14,966 |
from mrjob.job import MRJob
from mrjob.protocol import JSONValueProtocol
import re
WORD_RE = re.compile(r"[\w']+")
class ReviewWordCount(MRJob):
INPUT_PROTOCOL = JSONValueProtocol
def extract_words(self, _, record):
"""Extract words using a regular expression. Normalize the text to
ignore capitalization."""
if record['type'] == 'review':
for word in WORD_RE.findall(record['text']):
yield [word.lower(), 1]
def count_words(self, word, counts):
"""Summarize all the counts by taking the sum."""
yield [word, sum(counts)]
def steps(self):
"""Counts the number of words in all reviews
extract_words: <line, record> => <word, count>
count_words: <word, counts> => <word, total>
"""
return [
self.mr(self.extract_words, self.count_words),
]
if __name__ == '__main__':
ReviewWordCount.run()
| shngli/Data-Mining-Python | MapReduce Yelp analysis/review_word_count.py | Python | gpl-3.0 | 945 |
import os
import sys
import time
import traceback
from multiprocessing.dummy import Pool
# self-defined module
import parser
import image_downloader
import uploader
import source_map
# third party module
import yaml
import updater
import click
from google.api_core.exceptions import GoogleAPICallError
class UfileTransfer:
def __init__(self, root_path, config_file):
self._root_path = root_path
self.sql_parser = [] # array of db_parser
self._config = config_file
self.project_parser = parser.FileParser(source=self._config['parse_folder'])
self.source_map = source_map.SourceMap(os.path.join(root_path, self._config["source_map_name"]))
db_configs = [
self._config["mysql"][0]["member"],
self._config["mysql"][2]["global"],
self._config["mysql"][3]["friend"],
self._config["mysql"][5]["billing"],
self._config["mysql"][7]["stat"],
self._config["mysql"][8]["admin"],
self._config["mysql"][9]["agent"],
self._config["mysql"][10]["log"],
self._config["mysql"][11]["event"],
self._config["mysql"][12]["tree"],
self._config["mysql"][13]["dynamic"],
self._config["mysql"][14]["billboard"],
self._config["mysql"][15]["fan"],
]
pool = Pool(4)
self.sql_parser = pool.map(parser.DBParser, db_configs)
pool.close()
pool.join()
self._gcs_host = self._config["gcs_host"]
def process_db(self, commit=True):
# maybe we can use multi process
for sql_parser in self.sql_parser:
try:
u = updater.SQLUpdater(sql_parser.config,
source_map=self.source_map,
gcs_host=self._gcs_host,
conn=sql_parser.conn,
commit=commit)
for table in sql_parser.walk():
for result in sql_parser.parse(table):
column_name, match, primary_key_column_name, primary_key_value = result
if match is not None:
u.generate_update_sql(table, column_name, match[0], (primary_key_column_name, primary_key_value))
except Exception as e:
print(f"exception happen: {e}")
print(traceback.print_exc())
finally:
# do update iterate each db
u.update()
self.source_map.commit()
sql_parser.close()
print(f"-- end of {sql_parser.config['db_name']} --")
def process_file(self, commit=True):
"""start project parser"""
for file in self.project_parser.walk():
try:
u = updater.FileUpdater(file,
source_map=self.source_map,
gcs_host=self._gcs_host,
commit=commit)
for result in self.project_parser.parse(file):
line_num, content, match = result
pos = f"file: {file}, line: {line_num}"
u.update_line(content, match, pos)
self.source_map.commit()
except Exception as e:
print(f"exception happen: {e} file: {file}")
finally:
u.close()
def process(self):
self.process_file()
self.process_db()
def revert_file(self):
for file in self.project_parser.walk():
u = updater.FileUpdater(file)
u.revert()
def revert_db(self):
for sql_parser in self.sql_parser:
u = updater.SQLUpdater(sql_parser.config,
source_map=self.source_map,
gcs_host=self._gcs_host)
u.revert()
def revert(self):
self.revert_db()
self.revert_file()
def close(self):
self.source_map.close()
class UfileDownloader:
"""UfileDownloader download ufile images to local.
depends on image downloader, SourceMap
"""
def __init__(self, root_path, config):
self._root_path = root_path
self._source_map = source_map.SourceMap(config["source_map_name"])
self._download_folder = config["download_folder"]
self._image_downloader = image_downloader.ImageDownloader()
def start(self):
"""start download"""
for resource in self._source_map.get_undownloaded_resources():
link = resource.name
target_path = os.path.join(self._root_path, self._download_folder)
if not os.path.exists(target_path):
os.makedirs(target_path)
store_path = os.path.join(target_path, resource.subpath)
try:
self._image_downloader.download_to_file(link, store_path)
self._source_map.mark_downloaded(resource)
except FileNotFoundError as e:
raise e
except Exception as e:
print(f"error happen {e}")
time.sleep(0.2)
class UfileUploader:
"""UfileUploader uploads ufiles to gcs
read files from downloader and upload them
"""
def __init__(self, root_path, config):
self._root_path = root_path
self._source_map = source_map.SourceMap(os.path.join(self._root_path, config["source_map_name"]))
self._download_folder = config["download_folder"]
credential_path = os.path.join(self._root_path, "google-application-credentials.json")
self._uploader = uploader.Uploader(self._root_path,
credential_path,
gcs_path=config["gcs_path"],
bucket_name=config["bucket_name"])
def start(self):
"""start upload"""
resource_map = {}
for resource in self._source_map.get_unuploaded_resources():
try:
file_name = os.path.join(self._root_path, self._download_folder, resource.subpath)
if not file_name in resource_map:
self._uploader.upload(file_name)
self._source_map.mark_uploaded(resource)
resource_map[file_name] = True
except GoogleAPICallError:
pass
if __name__ == "__main__":
# how to use
# for staging env
# change your config to staging.yaml
# First, we only need to parse the db and project files (without commit)
# Second, downloads those files
# Thrid, uploads those fiels
# Forth, run transfer service again but this time with (commit). this
# will make project and db changed
root = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(root, "sql_dumper/prod.yaml")
with open(config_file) as f:
config = yaml.load(f)
@click.group(chain=True)
def cli():
"""This script provides ufile transfer utils.
- transfer
- downloader
- uploader
"""
pass
@cli.command("transfer")
@click.option("-c", "--commit", default=True, help="make db and file changed: ture or fale")
@click.option("-t", "--target", default="all", help="run db or file transfer: db, file, or all")
def start_ufile_transfer(commit, target):
"""start ufile transfer to extract ufile links from files and
db and store in the source db
"""
ut = UfileTransfer(root, config)
commit = True if commit.lower() == 'true' else False
if target == "db":
ut.process_db(commit)
elif target == "file":
ut.process_file(commit)
else:
ut.process()
ut.close()
@cli.command("reverter")
@click.option("-t", "--target", default="all", help="run db or file revert")
def start_ufile_reverter(target):
ut = UfileTransfer(root, config)
if target == "db":
ut.revert_db()
elif target == "file":
ut.revert_file()
else:
ut.revert()
ut.close()
@cli.command("download")
def start_ufile_downloader():
"""start downloading ufile assets from the source db
"""
ud = UfileDownloader(root, config)
ud.start()
@cli.command("upload")
def start_ufile_uploader():
"""start uploading ufile assets to the gcs
"""
up = UfileUploader(root, config)
up.start()
cli()
| sillygod/my-travel-in-learning-python | ufile_transfer/main.py | Python | gpl-2.0 | 8,700 |
from __future__ import unicode_literals
from django.conf import settings
# thx to @wullerot https://gist.github.com/wullerot/9fe3151101e57a9ee6fadb3cababb619
class LanguageTabsMixin(object):
change_form_template = 'admin/djangocms_misc/modeltranslation_lang_tabs_change_form.html'
def change_view(self, request, object_id, form_url='', extra_context=None):
context = extra_context or {}
context['tab_languages'] = settings.LANGUAGES
return super(LanguageTabsMixin, self).change_view(
request,
object_id=object_id,
form_url=form_url,
extra_context=context
)
def add_view(self, request, form_url='', extra_context=None):
context = extra_context or {}
context['tab_languages'] = settings.LANGUAGES
return super(LanguageTabsMixin, self).add_view(
request,
form_url=form_url,
extra_context=context
)
| bnzk/djangocms-misc | djangocms_misc/basic/admin.py | Python | mit | 960 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The plugin serving the interactive inference tab."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import io
import json
import logging
import math
import numpy as np
import os
import werkzeug
from werkzeug import wrappers
from six.moves import xrange # pylint: disable=redefined-builtin
from google.protobuf import json_format
from grpc.framework.interfaces.face.face import AbortionError
from werkzeug import wrappers
import tensorflow as tf
from tensorboard.backend import http_util
from tensorboard.plugins import base_plugin
from utils import common_utils
from utils import inference_utils
from utils import platform_utils
logger = logging.getLogger('tensorboard')
# Max number of examples to scan along the `examples_path` in order to return
# statistics and sampling for features.
NUM_EXAMPLES_TO_SCAN = 50
# Max number of mutants to show per feature (i.e. num of points along x-axis).
NUM_MUTANTS = 10
class WhatIfToolPlugin(base_plugin.TBPlugin):
"""Plugin for understanding/debugging model inference.
"""
# This string field is used by TensorBoard to generate the paths for routes
# provided by this plugin. It must thus be URL-friendly. This field is also
# used to uniquely identify this plugin throughout TensorBoard. See BasePlugin
# for details.
plugin_name = 'whatif'
examples = []
updated_example_indices = set()
sprite = None
example_class = tf.train.Example
# The standard name for encoded image features in TensorFlow.
image_feature_name = 'image/encoded'
# The width and height of the thumbnail for any images for Facets Dive.
sprite_thumbnail_dim_px = 32
# The vocab of inference class indices to label names for the model.
label_vocab = []
def __init__(self, context):
"""Constructs an interactive inference plugin for TensorBoard.
Args:
context: A base_plugin.TBContext instance.
"""
self._logdir = context.logdir
self._wit_data_dir = context.flags.wit_data_dir if context.flags else None
self.custom_predict_fn = None
if context.flags and context.flags.custom_predict_fn:
try:
import importlib.util as iu
spec = iu.spec_from_file_location("custom_predict_fn", context.flags.custom_predict_fn)
module = iu.module_from_spec(spec)
spec.loader.exec_module(module)
self.custom_predict_fn = module.custom_predict_fn
logger.info("custom_predict_fn loaded.")
except Exception as e:
logger.error(str(e))
logger.error("Failed to load the custom predict function.")
logger.error("Have you defined a function named custom_predict_fn?")
def get_plugin_apps(self):
"""Obtains a mapping between routes and handlers. Stores the logdir.
Returns:
A mapping between routes and handlers (functions that respond to
requests).
"""
return {
'/index.js': self._serve_js,
'/wit_tb_bin.html': self._serve_wit,
'/wit_tb_bin.js': self._serve_wit_js,
'/infer': self._infer,
'/update_example': self._update_example,
'/examples_from_path': self._examples_from_path_handler,
'/sprite': self._serve_sprite,
'/duplicate_example': self._duplicate_example,
'/delete_example': self._delete_example,
'/infer_mutants': self._infer_mutants_handler,
'/eligible_features': self._eligible_features_from_example_handler,
'/sort_eligible_features': self._sort_eligible_features_handler,
}
def is_active(self):
"""Determines whether this plugin is active.
Returns:
A boolean. Whether this plugin is active.
"""
# TODO(jameswex): Maybe enable if config flags were specified?
return False
def frontend_metadata(self):
return base_plugin.FrontendMetadata(
es_module_path="/index.js",
tab_name='What-If Tool')
@wrappers.Request.application
def _serve_js(self, request):
del request # unused
filepath = os.path.join(os.path.dirname(__file__), "static", "index.js")
with io.open(filepath, encoding='utf-8') as infile:
contents = infile.read()
return werkzeug.Response(contents, content_type="application/javascript")
@wrappers.Request.application
def _serve_wit(self, request):
del request # unused
filepath = os.path.join(os.path.dirname(__file__), "static", "wit_tb_bin.html")
with io.open(filepath, encoding='utf-8') as infile:
contents = infile.read()
return werkzeug.Response(contents, content_type="text/html")
@wrappers.Request.application
def _serve_wit_js(self, request):
del request # unused
filepath = os.path.join(os.path.dirname(__file__), "static", "wit_tb_bin.js")
with io.open(filepath, encoding='utf-8') as infile:
contents = infile.read()
return werkzeug.Response(contents, content_type="application/javascript")
def generate_sprite(self, example_strings):
# Generate a sprite image for the examples if the examples contain the
# standard encoded image feature.
feature_list = (self.examples[0].features.feature
if self.example_class == tf.train.Example
else self.examples[0].context.feature)
self.sprite = (
inference_utils.create_sprite_image(example_strings)
if (len(self.examples) and self.image_feature_name in feature_list) else
None)
@wrappers.Request.application
def _examples_from_path_handler(self, request):
"""Returns JSON of the specified examples.
Args:
request: A request that should contain 'examples_path' and 'max_examples'.
Returns:
JSON of up to max_examlpes of the examples in the path.
"""
examples_count = int(request.args.get('max_examples'))
examples_path = request.args.get('examples_path')
sampling_odds = float(request.args.get('sampling_odds'))
self.example_class = (tf.train.SequenceExample
if request.args.get('sequence_examples') == 'true'
else tf.train.Example)
try:
platform_utils.throw_if_file_access_not_allowed(examples_path,
self._logdir,
self._wit_data_dir)
example_strings = platform_utils.example_protos_from_path(
examples_path, examples_count, parse_examples=False,
sampling_odds=sampling_odds, example_class=self.example_class)
self.examples = [
self.example_class.FromString(ex) for ex in example_strings]
self.generate_sprite(example_strings)
json_examples = [
json_format.MessageToJson(example) for example in self.examples
]
self.updated_example_indices = set(range(len(json_examples)))
return http_util.Respond(
request,
{'examples': json_examples,
'sprite': True if self.sprite else False}, 'application/json')
except common_utils.InvalidUserInputError as e:
logger.error('Data loading error: %s', e.message)
return http_util.Respond(request, e.message,
'application/json', code=400)
except Exception as e:
return http_util.Respond(request, str(e),
'application/json', code=400)
@wrappers.Request.application
def _serve_sprite(self, request):
return http_util.Respond(request, self.sprite, 'image/png')
@wrappers.Request.application
def _update_example(self, request):
"""Updates the specified example.
Args:
request: A request that should contain 'index' and 'example'.
Returns:
An empty response.
"""
if request.method != 'POST':
return http_util.Respond(request, 'invalid non-POST request',
'application/json', code=405)
example_json = request.form['example']
index = int(request.form['index'])
if index >= len(self.examples):
return http_util.Respond(request, 'invalid index provided',
'application/json', code=400)
new_example = self.example_class()
json_format.Parse(example_json, new_example)
self.examples[index] = new_example
self.updated_example_indices.add(index)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
@wrappers.Request.application
def _duplicate_example(self, request):
"""Duplicates the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, 'invalid index provided',
'application/json', code=400)
new_example = self.example_class()
new_example.CopyFrom(self.examples[index])
self.examples.append(new_example)
self.updated_example_indices.add(len(self.examples) - 1)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
@wrappers.Request.application
def _delete_example(self, request):
"""Deletes the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, 'invalid index provided',
'application/json', code=400)
del self.examples[index]
self.updated_example_indices = set([
i if i < index else i - 1 for i in self.updated_example_indices])
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json')
def _parse_request_arguments(self, request):
"""Parses comma separated request arguments
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_version', 'model_signature'.
Returns:
A tuple of lists for model parameters
"""
inference_addresses = request.args.get('inference_address').split(',')
model_names = request.args.get('model_name').split(',')
model_versions = request.args.get('model_version').split(',')
model_signatures = request.args.get('model_signature').split(',')
if len(model_names) != len(inference_addresses):
raise common_utils.InvalidUserInputError('Every model should have a ' +
'name and address.')
return inference_addresses, model_names, model_versions, model_signatures
@wrappers.Request.application
def _infer(self, request):
"""Returns JSON for the `vz-line-chart`s for a feature.
Args:
request: A request that should contain 'inference_address', 'model_name',
'model_type, 'model_version', 'model_signature' and 'label_vocab_path'.
Returns:
A list of JSON objects, one for each chart.
"""
label_vocab = inference_utils.get_label_vocab(
request.args.get('label_vocab_path'))
try:
if request.method != 'GET':
logger.error('%s requests are forbidden.', request.method)
return http_util.Respond(request, 'invalid non-GET request',
'application/json', code=405)
(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)
indices_to_infer = sorted(self.updated_example_indices)
examples_to_infer = [self.examples[index] for index in indices_to_infer]
infer_objs = []
for model_num in xrange(len(inference_addresses)):
serving_bundle = inference_utils.ServingBundle(
inference_addresses[model_num],
model_names[model_num],
request.args.get('model_type'),
model_versions[model_num],
model_signatures[model_num],
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor'),
custom_predict_fn=self.custom_predict_fn)
(predictions, _) = inference_utils.run_inference_for_inference_results(
examples_to_infer, serving_bundle)
infer_objs.append(predictions)
resp = {'indices': indices_to_infer, 'results': infer_objs}
self.updated_example_indices = set()
return http_util.Respond(request, {'inferences': json.dumps(resp),
'vocab': json.dumps(label_vocab)},
'application/json')
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, e.message,
'application/json', code=400)
except AbortionError as e:
return http_util.Respond(request, e.details,
'application/json', code=400)
except Exception as e:
return http_util.Respond(request, str(e),
'application/json', code=400)
@wrappers.Request.application
def _eligible_features_from_example_handler(self, request):
"""Returns a list of JSON objects for each feature in the example.
Args:
request: A request for features.
Returns:
A list with a JSON object for each feature.
Numeric features are represented as {name: observedMin: observedMax:}.
Categorical features are repesented as {name: samples:[]}.
"""
features_list = inference_utils.get_eligible_features(
self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
return http_util.Respond(request, features_list, 'application/json')
@wrappers.Request.application
def _sort_eligible_features_handler(self, request):
"""Returns a sorted list of JSON objects for each feature in the example.
The list is sorted by interestingness in terms of the resulting change in
inference values across feature values, for partial dependence plots.
Args:
request: A request for sorted features.
Returns:
A sorted list with a JSON object for each feature.
Numeric features are represented as
{name: observedMin: observedMax: interestingness:}.
Categorical features are repesented as
{name: samples:[] interestingness:}.
"""
try:
features_list = inference_utils.get_eligible_features(
self.examples[0: NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS)
example_index = int(request.args.get('example_index', '0'))
(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)
chart_data = {}
for feat in features_list:
chart_data[feat['name']] = self._infer_mutants_impl(
feat['name'], example_index,
inference_addresses, model_names, request.args.get('model_type'),
model_versions, model_signatures,
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor'),
feat['observedMin'] if 'observedMin' in feat else 0,
feat['observedMax'] if 'observedMin' in feat else 0,
None, custom_predict_fn=self.custom_predict_fn)
features_list = inference_utils.sort_eligible_features(
features_list, chart_data)
return http_util.Respond(request, features_list, 'application/json')
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, e.message,
'application/json', code=400)
except Exception as e:
return http_util.Respond(request, str(e),
'application/json', code=400)
@wrappers.Request.application
def _infer_mutants_handler(self, request):
"""Returns JSON for the partial dependence plots for a feature.
Args:
request: A request that should contain 'feature_name', 'example_index',
'inference_address', 'model_name', 'model_type', 'model_version', and
'model_signature'.
Returns:
A list of JSON objects, one for each chart.
"""
try:
if request.method != 'GET':
logger.error('%s requests are forbidden.', request.method)
return http_util.Respond(request, 'invalid non-GET request',
'application/json', code=405)
example_index = int(request.args.get('example_index', '0'))
feature_name = request.args.get('feature_name')
(inference_addresses, model_names, model_versions,
model_signatures) = self._parse_request_arguments(request)
json_mapping = self._infer_mutants_impl(feature_name, example_index,
inference_addresses, model_names, request.args.get('model_type'),
model_versions, model_signatures,
request.args.get('use_predict') == 'true',
request.args.get('predict_input_tensor'),
request.args.get('predict_output_tensor'),
request.args.get('x_min'), request.args.get('x_max'),
request.args.get('feature_index_pattern'),
custom_predict_fn=self.custom_predict_fn)
return http_util.Respond(request, json_mapping, 'application/json')
except common_utils.InvalidUserInputError as e:
return http_util.Respond(request, e.message,
'application/json', code=400)
except Exception as e:
return http_util.Respond(request, str(e),
'application/json', code=400)
def _infer_mutants_impl(self, feature_name, example_index, inference_addresses,
model_names, model_type, model_versions, model_signatures, use_predict,
predict_input_tensor, predict_output_tensor, x_min, x_max,
feature_index_pattern, custom_predict_fn):
"""Helper for generating PD plots for a feature."""
examples = (self.examples if example_index == -1
else [self.examples[example_index]])
serving_bundles = []
for model_num in xrange(len(inference_addresses)):
serving_bundles.append(inference_utils.ServingBundle(
inference_addresses[model_num],
model_names[model_num],
model_type,
model_versions[model_num],
model_signatures[model_num],
use_predict,
predict_input_tensor,
predict_output_tensor,
custom_predict_fn=custom_predict_fn))
viz_params = inference_utils.VizParams(
x_min, x_max,
self.examples[0:NUM_EXAMPLES_TO_SCAN], NUM_MUTANTS,
feature_index_pattern)
return inference_utils.mutant_charts_for_feature(
examples, feature_name, serving_bundles, viz_params)
| pair-code/what-if-tool | tensorboard_plugin_wit/wit_plugin.py | Python | apache-2.0 | 19,439 |
class TestOpenIDRelyingPartyMiddleware(object):
'''Test Application for the Authentication handler to protect'''
response = "Test Authentication redirect application"
def __init__(self, app_conf, **local_conf):
self.beakerSessionKeyName = app_conf['beakerSessionKeyName']
def __call__(self, environ, start_response):
username = environ[self.beakerSessionKeyName].get('username')
if username:
response = """<html>
<head/>
<body>
<p>Authenticated!</p>
<p><a href="/logout">logout</a></p>
</body>
</html>"""
start_response('200 OK',
[('Content-type', 'text/html'),
('Content-length', str(len(response)))])
else:
response = "Trigger OpenID Relying Party..."
start_response('401 Unauthorized',
[('Content-type', 'text/plain'),
('Content-length', str(len(response)))])
return [response]
# To start run
# $ paster serve services.ini or run this file as a script
# $ ./securedapp.py [port #]
if __name__ == '__main__':
import sys
import os
from os.path import dirname, abspath
import logging
logging.basicConfig(level=logging.DEBUG)
if len(sys.argv) > 1:
port = int(sys.argv[1])
else:
port = 6080
cfgFilePath = os.path.join(dirname(abspath(__file__)), 'securedapp.ini')
from paste.httpserver import serve
from paste.deploy import loadapp
from paste.script.util.logging_config import fileConfig
fileConfig(cfgFilePath)
app = loadapp('config:%s' % cfgFilePath)
serve(app, host='0.0.0.0', port=port) | philipkershaw/ndg_security_server | ndg/security/server/test/integration/openidrp_and_sslauthn/securedapp.py | Python | bsd-3-clause | 1,756 |
from django import template
from django.utils.translation import ugettext
register = template.Library()
@register.filter
def get_item(d, key):
return d.get(key, None)
@register.filter(name='translate')
def translate(text):
return ugettext(text)
| yourlabs/django-permissions-widget | permissions_widget/templatetags/permissions_widget_tags.py | Python | mit | 258 |
import numpy as np
import tensorflow as tf
def _xavier_init(fan_in, fan_out, constant=1):
"""
Xavier initialization of network weights.
For some explanations see:
- http://andyljones.tumblr.com/post/110998971763/an-explanation-of-xavier-initialization
- http://jmlr.org/proceedings/papers/v9/glorot10a/glorot10a.pdf
- https://stackoverflow.com/questions/33640581/how-to-do-xavier-initialization-on-tensorflow
"""
low = -constant * np.sqrt(6.0 / (fan_in + fan_out))
high = constant * np.sqrt(6.0 / (fan_in + fan_out))
return tf.random_uniform((fan_in, fan_out),
minval=low, maxval=high,
dtype=tf.float32)
class SimpleAutoencoder(object):
"""
Autoencoder implementation based on inputs from https://jmetzen.github.io/2015-11-27/vae.html.
See the project README on how to use it.
"""
def __init__(self,
network_architecture,
transfer_fct=tf.nn.tanh,
batch_size=100,
weight_regularization=0.0,
bias_regularization=0.0,
tied=False):
self.network_architecture = network_architecture
self.transfer_fct = transfer_fct
self.batch_size = batch_size
self.weight_regularization = weight_regularization
self.bias_regularization = bias_regularization
self.tied = tied
self._encoder_layers = []
self._decoder_layers = []
self._recording = {'epoch': 0,
'learning_rate_schedule': dict(),
'batch_cost': []}
# tf Graph input
self.learning_rate = tf.placeholder(tf.float32, shape=[])
self.x = tf.placeholder(tf.float32, [None, network_architecture["n_input"]])
self._create_network()
# Define loss function based variational upper-bound and
# corresponding optimizer
self._create_loss_optimizer()
# Initializing the tensor flow variables
init = tf.initialize_all_variables()
# Launch the session
num_cores = 6
self.sess = tf.InteractiveSession(config=tf.ConfigProto(device_count={'CPU': num_cores},
inter_op_parallelism_threads=num_cores,
intra_op_parallelism_threads=num_cores))
self.sess.run(init)
def _create_network(self):
# Initialize autoencoder network weights and biases
self.network_weights = self._initialize_weights(self.network_architecture, self.tied)
if self.tied:
self.x_encoded, self.x_decoded = self._tied_encoder_network(self.network_weights["weights_encoder"],
self.network_weights["biases_encoder"],
self.network_weights["biases_decoder"])
else:
assert 'decoder' in self.network_architecture.keys(), "Need to provide decoder network layer configuration for untied encoder."
self.x_encoded = self._encoder_network(self.network_weights["weights_encoder"],
self.network_weights["biases_encoder"])
self.x_decoded = self._decoder_network(self.network_weights["weights_decoder"],
self.network_weights["biases_decoder"])
@staticmethod
def _create_matrix_and_bias_sizes(n_input, lst_of_encoder_layer_sizes, n_compressed):
matrix_dims = zip([n_input] + lst_of_encoder_layer_sizes, lst_of_encoder_layer_sizes + [n_compressed])
bias_dims = [tup[1] for tup in matrix_dims]
return matrix_dims, bias_dims
@staticmethod
def _create_layers(component, lst_of_component_layer_sizes, n_input, n_compressed):
all_weights = dict()
if len(lst_of_component_layer_sizes) > 0:
matrix_dims, bias_dims = SimpleAutoencoder._create_matrix_and_bias_sizes(n_input,
lst_of_component_layer_sizes,
n_compressed)
all_weights['weights_{}'.format(component)] = dict()
all_weights['biases_{}'.format(component)] = dict()
for idx, dim in enumerate(matrix_dims):
print "{} layer {}, dimensionality {} -> {}".format(component, idx + 1, dim[0], dim[1])
all_weights['weights_{}'.format(component)].update(
{'h{}'.format(idx + 1): tf.Variable(_xavier_init(dim[0], dim[1]))})
all_weights['biases_{}'.format(component)].update(
{'b{}'.format(idx + 1): tf.Variable(tf.zeros([bias_dims[idx]], dtype=tf.float32))})
return all_weights
else:
raise AttributeError("need list of {} layer sizes".format(component))
@staticmethod
def _initialize_weights(architecture_dict, tied):
all_weights = dict()
n_input = architecture_dict.get('n_input')
n_compressed = architecture_dict.get('n_compressed')
all_weights.update(SimpleAutoencoder._create_layers('encoder', architecture_dict.get('encoder'), n_input, n_compressed))
if tied:
lst_of_dims = architecture_dict.get('encoder')
# List `reverse` manipulates IN-PLACE and returns None. Hence we cannot put this inline :(
lst_of_dims.reverse()
all_weights.update(SimpleAutoencoder._create_layers('decoder', lst_of_dims, n_compressed, n_input))
else:
assert 'decoder' in architecture_dict.keys(), "Need to provide decoder network layer configuration for untied encoder."
all_weights.update(SimpleAutoencoder._create_layers('decoder', architecture_dict.get('decoder'), n_compressed, n_input))
return all_weights
def _tied_encoder_network(self, weights, biases_encoder, biases_decoder):
layers = []
for idx in range(len(weights)):
if idx == 0:
layers.append(self.transfer_fct(
tf.add(tf.matmul(self.x, weights['h{}'.format(idx + 1)]), biases_encoder['b{}'.format(idx + 1)])))
else:
layers.append(self.transfer_fct(
tf.add(tf.matmul(layers[idx - 1], weights['h{}'.format(idx + 1)]), biases_encoder['b{}'.format(idx + 1)])))
for idx in range(len(weights)):
if idx == 0:
layers.append(self.transfer_fct(
tf.add(tf.matmul(layers[-1], weights['h{}'.format(len(weights) - idx)], transpose_b=True), biases_decoder['b{}'.format(idx + 1)])))
else:
layers.append(self.transfer_fct(
tf.add(tf.matmul(layers[len(weights) + idx -1], weights['h{}'.format(len(weights) - idx)], transpose_b=True),
biases_decoder['b{}'.format(idx + 1)])))
self._encoder_layers = layers[:len(weights)]
self._decoder_layers = layers[len(weights):]
return layers[len(weights) - 1], layers[-1]
def _encoder_network(self, weights, biases):
layers = []
for idx in range(len(weights)):
if idx == 0:
layers.append(self.transfer_fct(tf.add(tf.matmul(self.x, weights['h{}'.format(idx + 1)]), biases['b{}'.format(idx +1)])))
else:
layers.append(self.transfer_fct(tf.add(tf.matmul(layers[idx-1], weights['h{}'.format(idx + 1)]), biases['b{}'.format(idx +1)])))
self._encoder_layers = layers
return layers[-1]
def _decoder_network(self, weights, biases):
layers = []
for idx in range(len(weights)):
if idx == 0:
layers.append(self.transfer_fct(
tf.add(tf.matmul(self.x_encoded, weights['h{}'.format(idx + 1)]), biases['b{}'.format(idx + 1)])))
else:
layers.append(self.transfer_fct(
tf.add(tf.matmul(layers[idx - 1], weights['h{}'.format(idx + 1)]), biases['b{}'.format(idx + 1)])))
self._decoder_layers = layers
return layers[-1]
def _create_loss_optimizer(self):
# The reconstruction loss
reconstr_loss = tf.reduce_sum(tf.squared_difference(self.x, self.x_decoded))
# Weight matrix regularization loss
weight_reg = 0
for val in self.network_weights['weights_encoder'].itervalues():
weight_reg += tf.reduce_sum(tf.square(val))
for val in self.network_weights['weights_decoder'].itervalues():
weight_reg += tf.reduce_sum(tf.square(val))
# Bias vector regularization loss
bias_reg = 0
for val in self.network_weights['biases_encoder'].itervalues():
bias_reg += tf.reduce_sum(tf.square(val))
for val in self.network_weights['biases_decoder'].itervalues():
bias_reg += tf.reduce_sum(tf.square(val))
self.cost = tf.reduce_mean(reconstr_loss) + (self.weight_regularization * weight_reg + self.bias_regularization * bias_reg) / (2 * self.batch_size)
self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.learning_rate).minimize(self.cost)
def partial_fit(self, X, learning_rate=None):
"""Train model based on mini-batch of input data.
Return cost of mini-batch.
"""
if not learning_rate:
self.learning_rate = 0.001
opt, cost = self.sess.run((self.optimizer, self.cost),
feed_dict={self.x: X, self.learning_rate: learning_rate})
return cost
def encode(self, X):
"""Transform data by mapping it into the latent space."""
return self.sess.run(self.x_encoded, feed_dict={self.x: X})
def decode(self, X):
return self.sess.run(self.x_decoded, feed_dict={self.x: X})
def _monitor_layer(self, X, layer_index, network='encoder'):
if network == 'encoder':
layer_to_run = self._encoder_layers[layer_index]
elif network == 'decoder':
layer_to_run = self._decoder_layers[layer_index]
else:
raise AttributeError("network '{}' does not exist".format(network))
return self.sess.run(layer_to_run, feed_dict={self.x: X})
def _update_learning_rate(self, dct, epoch):
epoch_key = max(k for k in dct if k <= epoch)
if self._recording['epoch'] <= 1:
self._current_lr = dct[epoch_key]
self._recording['learning_rate_schedule'].update({self._recording['epoch'] - 1: self._current_lr})
if dct[epoch_key] != self._current_lr:
self._current_lr = dct[epoch_key]
self._recording['learning_rate_schedule'].update({self._recording['epoch'] - 1: self._current_lr})
return self._current_lr
def train(self, X, n_epochs, learning_rate=0.0001, display_step=10):
data = DataSet(X, self.batch_size)
for epoch in range(n_epochs):
self._recording['epoch'] += 1
if isinstance(learning_rate, dict):
lr = self._update_learning_rate(learning_rate, epoch)
else:
lr = learning_rate
data.reset_counter()
costs = []
while data.has_next():
batch_xs = data.next_batch()
costs.append(self.partial_fit(batch_xs, lr))
self._recording['batch_cost'].append(np.mean(costs))
# Display logs per epoch step
if (epoch+1) % display_step == 0:
print "Epoch:", '{0:04d} / {1:04d}'.format(epoch + 1, n_epochs), \
"cost=", "{:.9f}".format(self._recording['batch_cost'][-1])
class DataSet(object):
def __init__(self, data, batch_size = 100):
self.data = data
self.n_samples = self.data.shape[0]
self.n_dimensions = self.data.shape[1]
self.batch_size = batch_size
self.total_batches = int(self.n_samples / self.batch_size)
self.current_batch = 0
def next_batch(self):
batch_lower_idx = self.current_batch * self.batch_size
batch_upper_idx = (self.current_batch + 1) * self.batch_size
self.current_batch += 1
return self.data[batch_lower_idx:batch_upper_idx, :]
def has_next(self):
return self.current_batch < self.total_batches
def reset_counter(self):
# Shuffle data set on re-initialization
# Note that shuffle does this IN-PLACE. Sad :(
np.random.shuffle(self.data)
self.current_batch = 0
| jotterbach/dstk | DSTK/AutoEncoder/autoencoder.py | Python | mit | 12,799 |
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest_lib.common.utils import data_utils
from tempest_lib import exceptions as lib_exc
from tempest.api.network import base_routers as base
from tempest import config
from tempest import test
CONF = config.CONF
class RoutersNegativeTest(base.BaseRouterTest):
@classmethod
def skip_checks(cls):
super(RoutersNegativeTest, cls).skip_checks()
if not test.is_extension_enabled('router', 'network'):
msg = "router extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(RoutersNegativeTest, cls).resource_setup()
cls.router = cls.create_router(data_utils.rand_name('router-'))
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.tenant_cidr = (CONF.network.tenant_network_cidr
if cls._ip_version == 4 else
CONF.network.tenant_network_v6_cidr)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('37a94fc0-a834-45b9-bd23-9a81d2fd1e22')
def test_router_add_gateway_invalid_network_returns_404(self):
self.assertRaises(lib_exc.NotFound,
self.client.update_router,
self.router['id'],
external_gateway_info={
'network_id': self.router['id']})
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('11836a18-0b15-4327-a50b-f0d9dc66bddd')
def test_router_add_gateway_net_not_external_returns_400(self):
alt_network = self.create_network(
network_name=data_utils.rand_name('router-negative-'))
sub_cidr = netaddr.IPNetwork(self.tenant_cidr).next()
self.create_subnet(alt_network, cidr=sub_cidr)
self.assertRaises(lib_exc.BadRequest,
self.client.update_router,
self.router['id'],
external_gateway_info={
'network_id': alt_network['id']})
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('957751a3-3c68-4fa2-93b6-eb52ea10db6e')
def test_add_router_interfaces_on_overlapping_subnets_returns_400(self):
network01 = self.create_network(
network_name=data_utils.rand_name('router-network01-'))
network02 = self.create_network(
network_name=data_utils.rand_name('router-network02-'))
subnet01 = self.create_subnet(network01)
subnet02 = self.create_subnet(network02)
self._add_router_interface_with_subnet_id(self.router['id'],
subnet01['id'])
self.assertRaises(lib_exc.BadRequest,
self._add_router_interface_with_subnet_id,
self.router['id'],
subnet02['id'])
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('04df80f9-224d-47f5-837a-bf23e33d1c20')
def test_router_remove_interface_in_use_returns_409(self):
self.client.add_router_interface_with_subnet_id(
self.router['id'], self.subnet['id'])
self.assertRaises(lib_exc.Conflict,
self.client.delete_router,
self.router['id'])
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('c2a70d72-8826-43a7-8208-0209e6360c47')
def test_show_non_existent_router_returns_404(self):
router = data_utils.rand_name('non_exist_router')
self.assertRaises(lib_exc.NotFound, self.client.show_router,
router)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('b23d1569-8b0c-4169-8d4b-6abd34fad5c7')
def test_update_non_existent_router_returns_404(self):
router = data_utils.rand_name('non_exist_router')
self.assertRaises(lib_exc.NotFound, self.client.update_router,
router, name="new_name")
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('c7edc5ad-d09d-41e6-a344-5c0c31e2e3e4')
def test_delete_non_existent_router_returns_404(self):
router = data_utils.rand_name('non_exist_router')
self.assertRaises(lib_exc.NotFound, self.client.delete_router,
router)
class RoutersNegativeIpV6Test(RoutersNegativeTest):
_ip_version = 6
class DvrRoutersNegativeTest(base.BaseRouterTest):
@classmethod
def skip_checks(cls):
super(DvrRoutersNegativeTest, cls).skip_checks()
if not test.is_extension_enabled('dvr', 'network'):
msg = "DVR extension not enabled."
raise cls.skipException(msg)
@classmethod
def resource_setup(cls):
super(DvrRoutersNegativeTest, cls).resource_setup()
cls.router = cls.create_router(data_utils.rand_name('router'))
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
@test.attr(type=['negative', 'smoke'])
@test.idempotent_id('4990b055-8fc7-48ab-bba7-aa28beaad0b9')
def test_router_create_tenant_distributed_returns_forbidden(self):
self.assertRaises(lib_exc.Forbidden,
self.create_router,
data_utils.rand_name('router'),
distributed=True)
| fengbeihong/tempest_automate_ironic | tempest/api/network/test_routers_negative.py | Python | apache-2.0 | 5,998 |
from django.contrib import admin
from .models import Batch, Event, PreClaim, Claim, Department, Student
from django.contrib.admin.models import LogEntry
# Register your models here.
def js_approve(modeladmin, request, queryset):
queryset.update(js_approved = True)
js_approve.short_description = 'Joint Sec Approves'
def dean_approve(modeladmin,request, queryset):
queryset.update(dean_approved = True)
dean_approve.short_description = 'Dean Approves'
def sis_approve(modeladmin,request, queryset):
queryset.update(sis_approved = True)
sis_approve.short_description = 'SIS Approves'
@admin.register(PreClaim)
class PreClaimAdmin(admin.ModelAdmin):
exclude = ('students','dean_approved')
list_display = ('event','dean_approved')
actions = [dean_approve,]
def get_actions(self,request):
actions = super(PreClaimAdmin, self).get_actions(request)
if not request.user.has_perm('attendance.preclaim_dean_approve'):
del actions['dean_approve']
return actions
@admin.register(Claim)
class ClaimAdmin(admin.ModelAdmin):
exclude = ('sis_approved',)
list_display = ('student','name','period','date','event','pre_claim_approved','js_approved','sis_approved')
actions = [js_approve,sis_approve]
search_fields = ['student__name','student__roll_no','period__department__name']
def get_actions(self,request):
actions = super(ClaimAdmin, self).get_actions(request)
if not request.user.has_perm('attendance.claim_js_approve'):
del actions['js_approve']
if not request.user.has_perm('attendance.claim_sis_approve'):
del actions['sis_approve']
return actions
admin.site.register(Batch)
admin.site.register(Event)
admin.site.register(Department)
#admin.site.register(Student)
admin.site.register(LogEntry)
admin.site.site_header = "KMC Office"
admin.site.title = "KMC Office"
admin.site.index_title = ""
| tornadoalert/kmcoffice | attendance/admin.py | Python | gpl-3.0 | 1,936 |
from __future__ import absolute_import
import unittest
from variantmethod import variantmethod
class Testvariantmethod(unittest.TestCase):
def setUp(self):
class MyClass(object):
def __init__(self):
pass
@variantmethod
def variant(self, first):
return "self: {0}, first: {1}".format(type(self).__name__, first)
self.MyClass = MyClass
def test_classmethod(self):
self.assertEqual(
self.MyClass.variant('foo'), "self: type, first: foo"
)
def test_instancemethod(self):
inst = self.MyClass()
self.assertEqual(
inst.variant('bar'), "self: MyClass, first: bar"
)
inst.variant = 'things'
self.assertEqual(inst.variant, 'things')
if __name__ == "__main__":
unittest.main()
| OaklandPeters/clsdescriptor | clsdescriptor/test_variantmethod.py | Python | mit | 853 |
from optparse import make_option
from urlparse import urlparse
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from treeherder.client import PerfherderClient, PerformanceTimeInterval
from treeherder.perfalert import PerfDatum, TalosAnalyzer
class Command(BaseCommand):
help = """
Test running performance alert subsystem on a series
"""
option_list = BaseCommand.option_list + (
make_option('--server',
action='store',
dest='server',
default=None,
help='Server to get data from, default to local instance'),
make_option('--time-interval',
action='store',
default=PerformanceTimeInterval.WEEK,
type='int',
help='Time interval to test alert code on (defaults to one week)'),
make_option('--project',
action='append',
help='Project to get signatures from (specify multiple time to get multiple projects'),
make_option('--signature',
action='store',
help='Signature hash to process, defaults to all summary series')
)
@staticmethod
def _get_series_description(option_collection_hash, series_properties):
testname = series_properties.get('test', 'summary')
option_hash_strs = [o['name'] for o in option_collection_hash[
series_properties['option_collection_hash']]]
test_options = (series_properties.get('test_options', []) +
option_hash_strs)
return " ".join([str(s) for s in [series_properties['suite'],
testname] + test_options])
def handle(self, *args, **options):
if options['server']:
server_params = urlparse(options['server'])
server_protocol = server_params.scheme
server_host = server_params.netloc
else:
server_protocol = settings.TREEHERDER_REQUEST_PROTOCOL
server_host = settings.TREEHERDER_REQUEST_HOST
if not options['project']:
raise CommandError("Must specify at least one project with "
"--project")
pc = PerfherderClient(protocol=server_protocol,
host=server_host)
option_collection_hash = pc.get_option_collection_hash()
# print csv header
print ','.join(["project", "platform", "signature", "series",
"testrun_id", "push_timestamp", "change",
"percent change", "t-value", "revision"])
for project in options['project']:
if options['signature']:
signatures = [options['signature']]
signature_data = {}
else:
signature_data = pc.get_performance_signatures(
project, time_interval=options['time_interval'])
signatures = []
# if doing everything, only handle summary series
for (signature, properties) in signature_data.iteritems():
if 'subtest_signatures' in properties:
signatures.append(signature)
for signature in signatures:
series = pc.get_performance_series(
project, signature,
time_interval=options['time_interval'])
series_properties = signature_data.get(signature)
if not series_properties:
series_properties = pc.get_performance_signature_properties(
project, signature)
if series_properties.get('subtest_signatures') is not None:
meanvar = 'geomean'
else:
meanvar = 'mean'
perf_data = []
for (result_set_id, timestamp, mean) in zip(
series['result_set_id'], series['push_timestamp'],
series[meanvar]):
perf_data.append(PerfDatum(timestamp, mean, testrun_id=result_set_id))
ta = TalosAnalyzer()
ta.addData(perf_data)
for r in ta.analyze_t():
if r.state == 'regression':
resultsets = pc.get_resultsets(project,
id=r.testrun_id)
if len(resultsets):
revision = resultsets[0]['revision']
else:
revision = ''
initial_value = r.historical_stats['avg']
new_value = r.forward_stats['avg']
if initial_value != 0:
pct_change = 100.0 * abs(new_value - initial_value) / float(initial_value)
else:
pct_change = 0.0
delta = (new_value - initial_value)
print ','.join(map(
lambda v: str(v),
[project, series_properties['machine_platform'],
signature, self._get_series_description(
option_collection_hash,
series_properties),
r.testrun_id, r.push_timestamp, delta,
pct_change, r.t, revision[0:12]]))
| vaishalitekale/treeherder | treeherder/model/management/commands/test_analyze_perf.py | Python | mpl-2.0 | 5,596 |
"""
I came up with this the first try. So, that's why this is posted in duplicate.
"""
import sys
try:
columns = int(input("How many columns? "))
rows = int(input("How many rows? "))
tall = int(input("How tall should the boxes be? "))
wide = int(input("How wide should the boxes be? "))
except Exception as e:
print(e)
print("You have fail")
print("Try type valid integer")
sys.exit(1)
i = 0
j = 0
k = 0
m = 0
while j <= rows:
print("+",end="")
while k < columns:
while i < wide:
print("-",end="")
i += 1
print("+",end="")
i = 0
k += 1
print('\r')
k = 0
if j < rows:
while m < tall:
print("|",end="")
while k < columns:
print(" "*wide,end="")
print("|",end="")
k += 1
k = 0
m += 1
print("\r")
m = 0
j += 1
sys.exit(0)
| MattD830/Python-INFO1-CE9990 | graphpaper2.py | Python | gpl-3.0 | 968 |
from sqlalchemy.dialects.mssql.pyodbc import MSDialect_pyodbc
class BigQueryDialect_pyodbc(MSDialect_pyodbc):
pass
| cpdean/sqlalchemy-bigquery | sqlalchemy_bigquery/pyodbc.py | Python | mit | 121 |
# -*- coding: utf-8 -*-
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility module for translating XML API objects to/from JSON objects."""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import datetime
import json
import re
import textwrap
import xml.etree.ElementTree
from xml.etree.ElementTree import ParseError as XmlParseError
import six
from apitools.base.py import encoding
import boto
from boto.gs.acl import ACL
from boto.gs.acl import ALL_AUTHENTICATED_USERS
from boto.gs.acl import ALL_USERS
from boto.gs.acl import Entries
from boto.gs.acl import Entry
from boto.gs.acl import GROUP_BY_DOMAIN
from boto.gs.acl import GROUP_BY_EMAIL
from boto.gs.acl import GROUP_BY_ID
from boto.gs.acl import USER_BY_EMAIL
from boto.gs.acl import USER_BY_ID
from boto.s3.tagging import Tags
from boto.s3.tagging import TagSet
from gslib.cloud_api import ArgumentException
from gslib.cloud_api import BucketNotFoundException
from gslib.cloud_api import NotFoundException
from gslib.cloud_api import Preconditions
from gslib.exception import CommandException
from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages
from gslib.utils.constants import S3_ACL_MARKER_GUID
from gslib.utils.constants import S3_MARKER_GUIDS
if six.PY3:
long = int
CACHE_CONTROL_REGEX = re.compile(r'^cache-control', re.I)
CONTENT_DISPOSITION_REGEX = re.compile(r'^content-disposition', re.I)
CONTENT_ENCODING_REGEX = re.compile(r'^content-encoding', re.I)
CONTENT_LANGUAGE_REGEX = re.compile(r'^content-language', re.I)
CONTENT_MD5_REGEX = re.compile(r'^content-md5', re.I)
CONTENT_TYPE_REGEX = re.compile(r'^content-type', re.I)
GOOG_API_VERSION_REGEX = re.compile(r'^x-goog-api-version', re.I)
GOOG_GENERATION_MATCH_REGEX = re.compile(r'^x-goog-if-generation-match', re.I)
GOOG_METAGENERATION_MATCH_REGEX = re.compile(r'^x-goog-if-metageneration-match',
re.I)
CUSTOM_GOOG_METADATA_REGEX = re.compile(r'^x-goog-meta-(?P<header_key>.*)',
re.I)
CUSTOM_AMZ_METADATA_REGEX = re.compile(r'^x-amz-meta-(?P<header_key>.*)', re.I)
CUSTOM_AMZ_HEADER_REGEX = re.compile(r'^x-amz-(?P<header_key>.*)', re.I)
# This distinguishes S3 custom headers from S3 metadata on objects.
S3_HEADER_PREFIX = 'custom-amz-header'
DEFAULT_CONTENT_TYPE = 'application/octet-stream'
# Because CORS is just a list in apitools, we need special handling or blank
# CORS lists will get sent with other configuration commands such as lifecycle,
# which would cause CORS configuration to be unintentionally removed.
# Protorpc defaults list values to an empty list, and won't allow us to set the
# value to None like other configuration fields, so there is no way to
# distinguish the default value from when we actually want to remove the CORS
# configuration. To work around this, we create a dummy CORS entry that
# signifies that we should nullify the CORS configuration.
# A value of [] means don't modify the CORS configuration.
# A value of REMOVE_CORS_CONFIG means remove the CORS configuration.
REMOVE_CORS_CONFIG = [
apitools_messages.Bucket.CorsValueListEntry(maxAgeSeconds=-1,
method=['REMOVE_CORS_CONFIG'])
]
# Similar to CORS above, we need a sentinel value allowing us to specify
# when a default object ACL should be private (containing no entries).
# A defaultObjectAcl value of [] means don't modify the default object ACL.
# A value of [PRIVATE_DEFAULT_OBJ_ACL] means create an empty/private default
# object ACL.
PRIVATE_DEFAULT_OBJ_ACL = apitools_messages.ObjectAccessControl(
id='PRIVATE_DEFAULT_OBJ_ACL')
def ObjectMetadataFromHeaders(headers):
"""Creates object metadata according to the provided headers.
gsutil -h allows specifiying various headers (originally intended
to be passed to boto in gsutil v3). For the JSON API to be compatible with
this option, we need to parse these headers into gsutil_api Object fields.
Args:
headers: Dict of headers passed via gsutil -h
Raises:
ArgumentException if an invalid header is encountered.
Returns:
apitools Object with relevant fields populated from headers.
"""
obj_metadata = apitools_messages.Object()
for header, value in headers.items():
if CACHE_CONTROL_REGEX.match(header):
obj_metadata.cacheControl = value.strip()
elif CONTENT_DISPOSITION_REGEX.match(header):
obj_metadata.contentDisposition = value.strip()
elif CONTENT_ENCODING_REGEX.match(header):
obj_metadata.contentEncoding = value.strip()
elif CONTENT_MD5_REGEX.match(header):
obj_metadata.md5Hash = value.strip()
elif CONTENT_LANGUAGE_REGEX.match(header):
obj_metadata.contentLanguage = value.strip()
elif CONTENT_TYPE_REGEX.match(header):
if not value:
obj_metadata.contentType = DEFAULT_CONTENT_TYPE
else:
obj_metadata.contentType = value.strip()
elif GOOG_API_VERSION_REGEX.match(header):
# API version is only relevant for XML, ignore and rely on the XML API
# to add the appropriate version.
continue
elif GOOG_GENERATION_MATCH_REGEX.match(header):
# Preconditions are handled elsewhere, but allow these headers through.
continue
elif GOOG_METAGENERATION_MATCH_REGEX.match(header):
# Preconditions are handled elsewhere, but allow these headers through.
continue
else:
custom_goog_metadata_match = CUSTOM_GOOG_METADATA_REGEX.match(header)
custom_amz_metadata_match = CUSTOM_AMZ_METADATA_REGEX.match(header)
custom_amz_header_match = CUSTOM_AMZ_HEADER_REGEX.match(header)
header_key = None
if custom_goog_metadata_match:
header_key = custom_goog_metadata_match.group('header_key')
elif custom_amz_metadata_match:
header_key = custom_amz_metadata_match.group('header_key')
elif custom_amz_header_match:
# If we got here we are guaranteed by the prior statement that this is
# not an x-amz-meta- header.
header_key = (S3_HEADER_PREFIX +
custom_amz_header_match.group('header_key'))
if header_key:
if header_key.lower() == 'x-goog-content-language':
# Work around content-language being inserted into custom metadata.
continue
if not obj_metadata.metadata:
obj_metadata.metadata = apitools_messages.Object.MetadataValue()
if not obj_metadata.metadata.additionalProperties:
obj_metadata.metadata.additionalProperties = []
obj_metadata.metadata.additionalProperties.append(
apitools_messages.Object.MetadataValue.AdditionalProperty(
key=header_key, value=value))
else:
raise ArgumentException('Invalid header specified: %s:%s' %
(header, value))
return obj_metadata
def HeadersFromObjectMetadata(dst_obj_metadata, provider):
"""Creates a header dictionary based on existing object metadata.
Args:
dst_obj_metadata: Object metadata to create the headers from.
provider: Provider string ('gs' or 's3').
Returns:
Headers dictionary.
"""
headers = {}
if not dst_obj_metadata:
return
# Metadata values of '' mean suppress/remove this header.
if dst_obj_metadata.cacheControl is not None:
if not dst_obj_metadata.cacheControl:
headers['cache-control'] = None
else:
headers['cache-control'] = dst_obj_metadata.cacheControl.strip()
if dst_obj_metadata.contentDisposition:
if not dst_obj_metadata.contentDisposition:
headers['content-disposition'] = None
else:
headers['content-disposition'] = (
dst_obj_metadata.contentDisposition.strip())
if dst_obj_metadata.contentEncoding:
if not dst_obj_metadata.contentEncoding:
headers['content-encoding'] = None
else:
headers['content-encoding'] = dst_obj_metadata.contentEncoding.strip()
if dst_obj_metadata.contentLanguage:
if not dst_obj_metadata.contentLanguage:
headers['content-language'] = None
else:
headers['content-language'] = dst_obj_metadata.contentLanguage.strip()
if dst_obj_metadata.md5Hash:
if not dst_obj_metadata.md5Hash:
headers['Content-MD5'] = None
else:
headers['Content-MD5'] = dst_obj_metadata.md5Hash.strip()
if dst_obj_metadata.contentType is not None:
if not dst_obj_metadata.contentType:
headers['content-type'] = None
else:
headers['content-type'] = dst_obj_metadata.contentType.strip()
if dst_obj_metadata.storageClass:
header_name = 'storage-class'
if provider == 'gs':
header_name = 'x-goog-' + header_name
elif provider == 's3':
header_name = 'x-amz-' + header_name
else:
raise ArgumentException('Invalid provider specified: %s' % provider)
headers[header_name] = dst_obj_metadata.storageClass.strip()
if (dst_obj_metadata.metadata and
dst_obj_metadata.metadata.additionalProperties):
for additional_property in dst_obj_metadata.metadata.additionalProperties:
# Work around content-language being inserted into custom metadata by
# the XML API.
if additional_property.key == 'content-language':
continue
# Don't translate special metadata markers.
if additional_property.key in S3_MARKER_GUIDS:
continue
if provider == 'gs':
header_name = 'x-goog-meta-' + additional_property.key
elif provider == 's3':
if additional_property.key.startswith(S3_HEADER_PREFIX):
header_name = ('x-amz-' +
additional_property.key[len(S3_HEADER_PREFIX):])
else:
header_name = 'x-amz-meta-' + additional_property.key
else:
raise ArgumentException('Invalid provider specified: %s' % provider)
if (additional_property.value is not None and
not additional_property.value):
headers[header_name] = None
else:
headers[header_name] = additional_property.value
return headers
def CopyObjectMetadata(src_obj_metadata, dst_obj_metadata, override=False):
"""Copies metadata from src_obj_metadata to dst_obj_metadata.
Args:
src_obj_metadata: Metadata from source object.
dst_obj_metadata: Initialized metadata for destination object.
override: If true, will overwrite metadata in destination object.
If false, only writes metadata for values that don't already
exist.
"""
if override or not dst_obj_metadata.cacheControl:
dst_obj_metadata.cacheControl = src_obj_metadata.cacheControl
if override or not dst_obj_metadata.contentDisposition:
dst_obj_metadata.contentDisposition = src_obj_metadata.contentDisposition
if override or not dst_obj_metadata.contentEncoding:
dst_obj_metadata.contentEncoding = src_obj_metadata.contentEncoding
if override or not dst_obj_metadata.contentLanguage:
dst_obj_metadata.contentLanguage = src_obj_metadata.contentLanguage
if override or not dst_obj_metadata.contentType:
dst_obj_metadata.contentType = src_obj_metadata.contentType
if override or not dst_obj_metadata.md5Hash:
dst_obj_metadata.md5Hash = src_obj_metadata.md5Hash
CopyCustomMetadata(src_obj_metadata, dst_obj_metadata, override=override)
def CopyCustomMetadata(src_obj_metadata, dst_obj_metadata, override=False):
"""Copies custom metadata from src_obj_metadata to dst_obj_metadata.
Args:
src_obj_metadata: Metadata from source object.
dst_obj_metadata: Initialized metadata for destination object.
override: If true, will overwrite metadata in destination object.
If false, only writes metadata for values that don't already
exist.
"""
# TODO: Apitools should ideally treat metadata like a real dictionary instead
# of a list of key/value pairs (with an O(N^2) lookup). In practice the
# number of values is typically small enough not to matter.
# Work around this by creating our own dictionary.
if (src_obj_metadata.metadata and
src_obj_metadata.metadata.additionalProperties):
if not dst_obj_metadata.metadata:
dst_obj_metadata.metadata = apitools_messages.Object.MetadataValue()
if not dst_obj_metadata.metadata.additionalProperties:
dst_obj_metadata.metadata.additionalProperties = []
dst_metadata_dict = {}
for dst_prop in dst_obj_metadata.metadata.additionalProperties:
dst_metadata_dict[dst_prop.key] = dst_prop.value
for src_prop in src_obj_metadata.metadata.additionalProperties:
if src_prop.key in dst_metadata_dict:
if override:
# Metadata values of '' mean suppress/remove this header.
if src_prop.value is not None and not src_prop.value:
dst_metadata_dict[src_prop.key] = None
else:
dst_metadata_dict[src_prop.key] = src_prop.value
elif src_prop.value != '': # pylint: disable=g-explicit-bool-comparison
# Don't propagate '' value since that means to remove the header.
dst_metadata_dict[src_prop.key] = src_prop.value
# Rewrite the list with our updated dict.
dst_obj_metadata.metadata.additionalProperties = []
for k, v in six.iteritems(dst_metadata_dict):
dst_obj_metadata.metadata.additionalProperties.append(
apitools_messages.Object.MetadataValue.AdditionalProperty(key=k,
value=v))
def PreconditionsFromHeaders(headers):
"""Creates bucket or object preconditions acccording to the provided headers.
Args:
headers: Dict of headers passed via gsutil -h
Returns:
gsutil Cloud API Preconditions object fields populated from headers, or None
if no precondition headers are present.
"""
return_preconditions = Preconditions()
try:
for header, value in headers.items():
if GOOG_GENERATION_MATCH_REGEX.match(header):
return_preconditions.gen_match = long(value)
if GOOG_METAGENERATION_MATCH_REGEX.match(header):
return_preconditions.meta_gen_match = long(value)
except ValueError as _:
raise ArgumentException('Invalid precondition header specified. '
'x-goog-if-generation-match and '
'x-goog-if-metageneration match must be specified '
'with a positive integer value.')
return return_preconditions
def CreateNotFoundExceptionForObjectWrite(dst_provider,
dst_bucket_name,
src_provider=None,
src_bucket_name=None,
src_object_name=None,
src_generation=None):
"""Creates a NotFoundException for an object upload or copy.
This is necessary because 404s don't necessarily specify which resource
does not exist.
Args:
dst_provider: String abbreviation of destination provider, e.g., 'gs'.
dst_bucket_name: Destination bucket name for the write operation.
src_provider: String abbreviation of source provider, i.e. 'gs', if any.
src_bucket_name: Source bucket name, if any (for the copy case).
src_object_name: Source object name, if any (for the copy case).
src_generation: Source object generation, if any (for the copy case).
Returns:
NotFoundException with appropriate message.
"""
dst_url_string = '%s://%s' % (dst_provider, dst_bucket_name)
if src_bucket_name and src_object_name:
src_url_string = '%s://%s/%s' % (src_provider, src_bucket_name,
src_object_name)
if src_generation:
src_url_string += '#%s' % str(src_generation)
return NotFoundException(
'The source object %s or the destination bucket %s does not exist.' %
(src_url_string, dst_url_string))
return NotFoundException(
'The destination bucket %s does not exist or the write to the '
'destination must be restarted' % dst_url_string)
def CreateBucketNotFoundException(code, provider, bucket_name):
return BucketNotFoundException('%s://%s bucket does not exist.' %
(provider, bucket_name),
bucket_name,
status=code)
def CreateObjectNotFoundException(code,
provider,
bucket_name,
object_name,
generation=None):
uri_string = '%s://%s/%s' % (provider, bucket_name, object_name)
if generation:
uri_string += '#%s' % str(generation)
return NotFoundException('%s does not exist.' % uri_string, status=code)
def CheckForXmlConfigurationAndRaise(config_type_string, json_txt):
"""Checks a JSON parse exception for provided XML configuration."""
try:
xml.etree.ElementTree.fromstring(str(json_txt))
raise ArgumentException('\n'.join(
textwrap.wrap(
'XML {0} data provided; Google Cloud Storage {0} configuration '
'now uses JSON format. To convert your {0}, set the desired XML '
'ACL using \'gsutil {1} set ...\' with gsutil version 3.x. Then '
'use \'gsutil {1} get ...\' with gsutil version 4 or greater to '
'get the corresponding JSON {0}.'.format(
config_type_string, config_type_string.lower()))))
except XmlParseError:
pass
raise ArgumentException('JSON %s data could not be loaded '
'from: %s' % (config_type_string, json_txt))
class LifecycleTranslation(object):
"""Functions for converting between various lifecycle formats.
This class handles conversation to and from Boto Cors objects, JSON text,
and apitools Message objects.
"""
@classmethod
def BotoLifecycleFromMessage(cls, lifecycle_message):
"""Translates an apitools message to a boto lifecycle object."""
boto_lifecycle = boto.gs.lifecycle.LifecycleConfig()
if lifecycle_message:
for rule_message in lifecycle_message.rule:
boto_rule = boto.gs.lifecycle.Rule()
if rule_message.action and rule_message.action.type:
if rule_message.action.type.lower() == 'delete':
boto_rule.action = boto.gs.lifecycle.DELETE
elif rule_message.action.type.lower() == 'setstorageclass':
boto_rule.action = boto.gs.lifecycle.SET_STORAGE_CLASS
boto_rule.action_text = rule_message.action.storageClass
if rule_message.condition:
if rule_message.condition.age is not None:
boto_rule.conditions[boto.gs.lifecycle.AGE] = (str(
rule_message.condition.age))
if rule_message.condition.createdBefore:
boto_rule.conditions[boto.gs.lifecycle.CREATED_BEFORE] = (str(
rule_message.condition.createdBefore))
if rule_message.condition.isLive is not None:
boto_rule.conditions[boto.gs.lifecycle.IS_LIVE] = (
# Note that the GCS XML API only accepts "false" or "true"
# in all lower case.
str(rule_message.condition.isLive).lower())
if rule_message.condition.matchesStorageClass:
boto_rule.conditions[boto.gs.lifecycle.MATCHES_STORAGE_CLASS] = [
str(sc) for sc in rule_message.condition.matchesStorageClass
]
if rule_message.condition.numNewerVersions is not None:
boto_rule.conditions[boto.gs.lifecycle.NUM_NEWER_VERSIONS] = (str(
rule_message.condition.numNewerVersions))
boto_lifecycle.append(boto_rule)
return boto_lifecycle
@classmethod
def BotoLifecycleToMessage(cls, boto_lifecycle):
"""Translates a boto lifecycle object to an apitools message."""
lifecycle_message = None
if boto_lifecycle:
lifecycle_message = apitools_messages.Bucket.LifecycleValue()
for boto_rule in boto_lifecycle:
lifecycle_rule = (
apitools_messages.Bucket.LifecycleValue.RuleValueListEntry())
lifecycle_rule.condition = (apitools_messages.Bucket.LifecycleValue.
RuleValueListEntry.ConditionValue())
if boto_rule.action:
if boto_rule.action == boto.gs.lifecycle.DELETE:
lifecycle_rule.action = (apitools_messages.Bucket.LifecycleValue.
RuleValueListEntry.ActionValue(
type='Delete'))
elif boto_rule.action == boto.gs.lifecycle.SET_STORAGE_CLASS:
lifecycle_rule.action = (apitools_messages.Bucket.LifecycleValue.
RuleValueListEntry.ActionValue(
type='SetStorageClass',
storageClass=boto_rule.action_text))
if boto.gs.lifecycle.AGE in boto_rule.conditions:
lifecycle_rule.condition.age = int(
boto_rule.conditions[boto.gs.lifecycle.AGE])
if boto.gs.lifecycle.CREATED_BEFORE in boto_rule.conditions:
lifecycle_rule.condition.createdBefore = (
LifecycleTranslation.TranslateBotoLifecycleTimestamp(
boto_rule.conditions[boto.gs.lifecycle.CREATED_BEFORE]))
if boto.gs.lifecycle.IS_LIVE in boto_rule.conditions:
boto_is_live_str = (
boto_rule.conditions[boto.gs.lifecycle.IS_LIVE].lower())
if boto_is_live_str == 'true':
lifecycle_rule.condition.isLive = True
elif boto_is_live_str == 'false':
lifecycle_rule.condition.isLive = False
else:
raise CommandException(
'Got an invalid Boto value for IsLive condition ("%s"), '
'expected "true" or "false".' %
boto_rule.conditions[boto.gs.lifecycle.IS_LIVE])
if boto.gs.lifecycle.MATCHES_STORAGE_CLASS in boto_rule.conditions:
for storage_class in (
boto_rule.conditions[boto.gs.lifecycle.MATCHES_STORAGE_CLASS]):
lifecycle_rule.condition.matchesStorageClass.append(storage_class)
if boto.gs.lifecycle.NUM_NEWER_VERSIONS in boto_rule.conditions:
lifecycle_rule.condition.numNewerVersions = int(
boto_rule.conditions[boto.gs.lifecycle.NUM_NEWER_VERSIONS])
lifecycle_message.rule.append(lifecycle_rule)
return lifecycle_message
@classmethod
def JsonLifecycleFromMessage(cls, lifecycle_message):
"""Translates an apitools message to lifecycle JSON."""
return str(encoding.MessageToJson(lifecycle_message)) + '\n'
@classmethod
def JsonLifecycleToMessage(cls, json_txt):
"""Translates lifecycle JSON to an apitools message."""
try:
deserialized_lifecycle = json.loads(json_txt)
# If lifecycle JSON is the in the following format
# {'lifecycle': {'rule': ... then strip out the 'lifecycle' key
# and reduce it to the following format
# {'rule': ...
if 'lifecycle' in deserialized_lifecycle:
deserialized_lifecycle = deserialized_lifecycle['lifecycle']
lifecycle = encoding.DictToMessage(
deserialized_lifecycle or {}, apitools_messages.Bucket.LifecycleValue)
return lifecycle
except ValueError:
CheckForXmlConfigurationAndRaise('lifecycle', json_txt)
@classmethod
def TranslateBotoLifecycleTimestamp(cls, lifecycle_datetime):
"""Parses the timestamp from the boto lifecycle into a datetime object."""
return datetime.datetime.strptime(lifecycle_datetime, '%Y-%m-%d').date()
class CorsTranslation(object):
"""Functions for converting between various CORS formats.
This class handles conversation to and from Boto Cors objects, JSON text,
and apitools Message objects.
"""
@classmethod
def BotoCorsFromMessage(cls, cors_message):
"""Translates an apitools message to a boto Cors object."""
cors = boto.gs.cors.Cors()
cors.cors = []
for collection_message in cors_message:
collection_elements = []
if collection_message.maxAgeSeconds:
collection_elements.append(
(boto.gs.cors.MAXAGESEC, str(collection_message.maxAgeSeconds)))
if collection_message.method:
method_elements = []
for method in collection_message.method:
method_elements.append((boto.gs.cors.METHOD, method))
collection_elements.append((boto.gs.cors.METHODS, method_elements))
if collection_message.origin:
origin_elements = []
for origin in collection_message.origin:
origin_elements.append((boto.gs.cors.ORIGIN, origin))
collection_elements.append((boto.gs.cors.ORIGINS, origin_elements))
if collection_message.responseHeader:
header_elements = []
for header in collection_message.responseHeader:
header_elements.append((boto.gs.cors.HEADER, header))
collection_elements.append((boto.gs.cors.HEADERS, header_elements))
cors.cors.append(collection_elements)
return cors
@classmethod
def BotoCorsToMessage(cls, boto_cors):
"""Translates a boto Cors object to an apitools message."""
message_cors = []
if boto_cors.cors:
for cors_collection in boto_cors.cors:
if cors_collection:
collection_message = apitools_messages.Bucket.CorsValueListEntry()
for element_tuple in cors_collection:
if element_tuple[0] == boto.gs.cors.MAXAGESEC:
collection_message.maxAgeSeconds = int(element_tuple[1])
if element_tuple[0] == boto.gs.cors.METHODS:
for method_tuple in element_tuple[1]:
collection_message.method.append(method_tuple[1])
if element_tuple[0] == boto.gs.cors.ORIGINS:
for origin_tuple in element_tuple[1]:
collection_message.origin.append(origin_tuple[1])
if element_tuple[0] == boto.gs.cors.HEADERS:
for header_tuple in element_tuple[1]:
collection_message.responseHeader.append(header_tuple[1])
message_cors.append(collection_message)
return message_cors
@classmethod
def JsonCorsToMessageEntries(cls, json_cors):
"""Translates CORS JSON to an apitools message.
Args:
json_cors: JSON string representing CORS configuration.
Raises:
ArgumentException on invalid CORS JSON data.
Returns:
List of apitools Bucket.CorsValueListEntry. An empty list represents
no CORS configuration.
"""
deserialized_cors = None
try:
deserialized_cors = json.loads(json_cors)
except ValueError:
CheckForXmlConfigurationAndRaise('CORS', json_cors)
if not isinstance(deserialized_cors, list):
raise ArgumentException(
'CORS JSON should be formatted as a list containing one or more JSON '
'objects.\nSee "gsutil help cors".')
cors = []
for cors_entry in deserialized_cors:
cors.append(
encoding.DictToMessage(cors_entry,
apitools_messages.Bucket.CorsValueListEntry))
return cors
@classmethod
def MessageEntriesToJson(cls, cors_message):
"""Translates an apitools message to CORS JSON."""
json_text = ''
# Because CORS is a MessageField, serialize/deserialize as JSON list.
json_text += '['
printed_one = False
for cors_entry in cors_message:
if printed_one:
json_text += ','
else:
printed_one = True
json_text += encoding.MessageToJson(cors_entry)
json_text += ']\n'
return json_text
def S3MarkerAclFromObjectMetadata(object_metadata):
"""Retrieves GUID-marked S3 ACL from object metadata, if present.
Args:
object_metadata: Object metadata to check.
Returns:
S3 ACL text, if present, None otherwise.
"""
if (object_metadata and object_metadata.metadata and
object_metadata.metadata.additionalProperties):
for prop in object_metadata.metadata.additionalProperties:
if prop.key == S3_ACL_MARKER_GUID:
return prop.value
def AddS3MarkerAclToObjectMetadata(object_metadata, acl_text):
"""Adds a GUID-marked S3 ACL to the object metadata.
Args:
object_metadata: Object metadata to add the acl to.
acl_text: S3 ACL text to add.
"""
if not object_metadata.metadata:
object_metadata.metadata = apitools_messages.Object.MetadataValue()
if not object_metadata.metadata.additionalProperties:
object_metadata.metadata.additionalProperties = []
object_metadata.metadata.additionalProperties.append(
apitools_messages.Object.MetadataValue.AdditionalProperty(
key=S3_ACL_MARKER_GUID, value=acl_text))
def UnaryDictToXml(message):
"""Generates XML representation of a nested dict.
This dict contains exactly one top-level entry and an arbitrary number of
2nd-level entries, e.g. capturing a WebsiteConfiguration message.
Args:
message: The dict encoding the message.
Returns:
XML string representation of the input dict.
Raises:
Exception: if dict contains more than one top-level entry.
"""
if len(message) != 1:
raise Exception('Expected dict of size 1, got size %d' % len(message))
name, content = message.items()[0]
element_type = xml.etree.ElementTree.Element(name)
for element_property, value in sorted(content.items()):
node = xml.etree.ElementTree.SubElement(element_type, element_property)
node.text = value
return xml.etree.ElementTree.tostring(element_type)
class LabelTranslation(object):
"""Functions for converting between various Label(JSON)/Tags(XML) formats.
This class handles conversion to and from Boto Tags objects, JSON text, and
apitools LabelsValue message objects.
"""
@classmethod
def BotoTagsToMessage(cls, tags):
label_dict = {}
for tag_set in tags:
label_dict.update(dict((i.key, i.value) for i in tag_set))
return cls.DictToMessage(label_dict) if label_dict else None
@classmethod
def BotoTagsFromMessage(cls, message):
label_dict = json.loads(cls.JsonFromMessage(message))
tag_set = TagSet()
for key, value in six.iteritems(label_dict):
if value: # Skip values which may be set to None.
tag_set.add_tag(key, value)
tags = Tags()
tags.add_tag_set(tag_set)
return tags
@classmethod
def JsonFromMessage(cls, message, pretty_print=False):
json_str = encoding.MessageToJson(message)
if pretty_print:
return json.dumps(json.loads(json_str),
sort_keys=True,
indent=2,
separators=(',', ': '))
return json_str
@classmethod
def DictToMessage(cls, label_dict):
return encoding.DictToMessage(label_dict,
apitools_messages.Bucket.LabelsValue)
class AclTranslation(object):
"""Functions for converting between various ACL formats.
This class handles conversion to and from Boto ACL objects, JSON text,
and apitools Message objects.
"""
JSON_TO_XML_ROLES = {
'READER': 'READ',
'WRITER': 'WRITE',
'OWNER': 'FULL_CONTROL',
}
XML_TO_JSON_ROLES = {
'READ': 'READER',
'WRITE': 'WRITER',
'FULL_CONTROL': 'OWNER',
}
@classmethod
def BotoAclFromJson(cls, acl_json):
acl = ACL()
acl.parent = None
acl.entries = cls.BotoEntriesFromJson(acl_json, acl)
return acl
@classmethod
# acl_message is a list of messages, either object or bucketaccesscontrol
def BotoAclFromMessage(cls, acl_message):
acl_dicts = []
for message in acl_message:
if message == PRIVATE_DEFAULT_OBJ_ACL:
# Sentinel value indicating acl_dicts should be an empty list to create
# a private (no entries) default object ACL.
break
acl_dicts.append(encoding.MessageToDict(message))
return cls.BotoAclFromJson(acl_dicts)
@classmethod
def BotoAclToJson(cls, acl):
if hasattr(acl, 'entries'):
return cls.BotoEntriesToJson(acl.entries)
return []
@classmethod
def BotoObjectAclToMessage(cls, acl):
for entry in cls.BotoAclToJson(acl):
message = encoding.DictToMessage(entry,
apitools_messages.ObjectAccessControl)
message.kind = 'storage#objectAccessControl'
yield message
@classmethod
def BotoBucketAclToMessage(cls, acl):
for entry in cls.BotoAclToJson(acl):
message = encoding.DictToMessage(entry,
apitools_messages.BucketAccessControl)
message.kind = 'storage#bucketAccessControl'
yield message
@classmethod
def BotoEntriesFromJson(cls, acl_json, parent):
entries = Entries(parent)
entries.parent = parent
entries.entry_list = [
cls.BotoEntryFromJson(entry_json) for entry_json in acl_json
]
return entries
@classmethod
def BotoEntriesToJson(cls, entries):
return [cls.BotoEntryToJson(entry) for entry in entries.entry_list]
@classmethod
def BotoEntryFromJson(cls, entry_json):
"""Converts a JSON entry into a Boto ACL entry."""
entity = entry_json['entity']
permission = cls.JSON_TO_XML_ROLES[entry_json['role']]
if entity.lower() == ALL_USERS.lower():
return Entry(type=ALL_USERS, permission=permission)
elif entity.lower() == ALL_AUTHENTICATED_USERS.lower():
return Entry(type=ALL_AUTHENTICATED_USERS, permission=permission)
elif entity.startswith('project'):
raise CommandException('XML API does not support project scopes, '
'cannot translate ACL.')
elif 'email' in entry_json:
if entity.startswith('user'):
scope_type = USER_BY_EMAIL
elif entity.startswith('group'):
scope_type = GROUP_BY_EMAIL
return Entry(type=scope_type,
email_address=entry_json['email'],
permission=permission)
elif 'entityId' in entry_json:
if entity.startswith('user'):
scope_type = USER_BY_ID
elif entity.startswith('group'):
scope_type = GROUP_BY_ID
return Entry(type=scope_type,
id=entry_json['entityId'],
permission=permission)
elif 'domain' in entry_json:
if entity.startswith('domain'):
scope_type = GROUP_BY_DOMAIN
return Entry(type=scope_type,
domain=entry_json['domain'],
permission=permission)
raise CommandException('Failed to translate JSON ACL to XML.')
@classmethod
def BotoEntryToJson(cls, entry):
"""Converts a Boto ACL entry to a valid JSON dictionary."""
acl_entry_json = {}
# JSON API documentation uses camel case.
scope_type_lower = entry.scope.type.lower()
if scope_type_lower == ALL_USERS.lower():
acl_entry_json['entity'] = 'allUsers'
elif scope_type_lower == ALL_AUTHENTICATED_USERS.lower():
acl_entry_json['entity'] = 'allAuthenticatedUsers'
elif scope_type_lower == USER_BY_EMAIL.lower():
acl_entry_json['entity'] = 'user-%s' % entry.scope.email_address
acl_entry_json['email'] = entry.scope.email_address
elif scope_type_lower == USER_BY_ID.lower():
acl_entry_json['entity'] = 'user-%s' % entry.scope.id
acl_entry_json['entityId'] = entry.scope.id
elif scope_type_lower == GROUP_BY_EMAIL.lower():
acl_entry_json['entity'] = 'group-%s' % entry.scope.email_address
acl_entry_json['email'] = entry.scope.email_address
elif scope_type_lower == GROUP_BY_ID.lower():
acl_entry_json['entity'] = 'group-%s' % entry.scope.id
acl_entry_json['entityId'] = entry.scope.id
elif scope_type_lower == GROUP_BY_DOMAIN.lower():
acl_entry_json['entity'] = 'domain-%s' % entry.scope.domain
acl_entry_json['domain'] = entry.scope.domain
else:
raise ArgumentException('ACL contains invalid scope type: %s' %
scope_type_lower)
acl_entry_json['role'] = cls.XML_TO_JSON_ROLES[entry.permission]
return acl_entry_json
@classmethod
def JsonToMessage(cls, json_data, message_type):
"""Converts the input JSON data into list of Object/BucketAccessControls.
Args:
json_data: String of JSON to convert.
message_type: Which type of access control entries to return,
either ObjectAccessControl or BucketAccessControl.
Raises:
ArgumentException on invalid JSON data.
Returns:
List of ObjectAccessControl or BucketAccessControl elements.
"""
try:
deserialized_acl = json.loads(json_data)
acl = []
for acl_entry in deserialized_acl:
acl.append(encoding.DictToMessage(acl_entry, message_type))
return acl
except ValueError:
CheckForXmlConfigurationAndRaise('ACL', json_data)
@classmethod
def JsonFromMessage(cls, acl):
"""Strips unnecessary fields from an ACL message and returns valid JSON.
Args:
acl: iterable ObjectAccessControl or BucketAccessControl
Returns:
ACL JSON string.
"""
serializable_acl = []
if acl is not None:
for acl_entry in acl:
if acl_entry.kind == 'storage#objectAccessControl':
acl_entry.object = None
acl_entry.generation = None
acl_entry.kind = None
acl_entry.bucket = None
acl_entry.id = None
acl_entry.selfLink = None
acl_entry.etag = None
serializable_acl.append(encoding.MessageToDict(acl_entry))
return json.dumps(serializable_acl,
sort_keys=True,
indent=2,
separators=(',', ': '))
| endlessm/chromium-browser | third_party/catapult/third_party/gsutil/gslib/utils/translation_helper.py | Python | bsd-3-clause | 38,303 |
# -*- coding: utf-8 -*-
'''
custimized seq2seq(https://github.com/farizrahman4u/seq2seq)
'''
from __future__ import absolute_import
from seq2seq.layers.encoders import LSTMEncoder
from seq2seq.layers.decoders import LSTMDecoder, LSTMDecoder2, AttentionDecoder
from seq2seq.layers.bidirectional import Bidirectional
from keras.layers.recurrent import LSTM
from keras.layers.core import RepeatVector, Dense, TimeDistributedDense, Dropout, Activation
from keras.models import Sequential
import theano.tensor as T
'''
Papers:
[1] Sequence to Sequence Learning with Neural Networks (http://arxiv.org/abs/1409.3215)
[2] Learning Phrase Representations using RNN Encoder-Decoder for Statistical Machine Translation (http://arxiv.org/abs/1406.1078)
[3] Neural Machine Translation by Jointly Learning to Align and Translate (http://arxiv.org/abs/1409.0473)
'''
class Seq2seqBase(Sequential):
'''
Abstract class for all Seq2seq models.
'''
wait_for_shape = False
def add(self, layer):
'''
For automatic shape inference in nested models.
'''
self.layers.append(layer)
n = len(self.layers)
if self.wait_for_shape or (n == 1 and not hasattr(layer, '_input_shape')):
self.wait_for_shape = True
elif n > 1:
layer.set_previous(self.layers[-2])
def set_previous(self, layer):
'''
For automatic shape inference in nested models.
'''
self.layers[0].set_previous(layer)
if self.wait_for_shape:
self.wait_for_shape = False
for i in range(1, len(self.layers)):
self.layers[i].set_previous(self.layers[i - 1])
def reset_states(self):
for l in self.layers:
if hasattr(l, 'stateful'):
if l.stateful:
l.reset_states()
class SimpleSeq2seq(Seq2seqBase):
'''
Simple model for sequence to sequence learning.
The encoder encodes the input sequence to vector (called context vector)
The decoder decoder the context vector in to a sequence of vectors.
There is no one on one relation between the input and output sequence elements.
The input sequence and output sequence may differ in length.
Arguments:
output_dim : Required output dimension.
hidden_dim : The dimension of the internal representations of the model.
output_length : Length of the required output sequence.
depth : Used to create a deep Seq2seq model. For example, if depth = 3,
there will be 3 LSTMs on the enoding side and 3 LSTMs on the
decoding side. You can also specify depth as a tuple. For example,
if depth = (4, 5), 4 LSTMs will be added to the encoding side and
5 LSTMs will be added to the decoding side.
dropout : Dropout probability in between layers.
'''
def __init__(self, output_dim, hidden_dim, output_length, depth=1, dropout=0.25, **kwargs):
super(SimpleSeq2seq, self).__init__()
if type(depth) not in [list, tuple]:
depth = (depth, depth)
self.encoder = LSTM(hidden_dim, **kwargs)
self.decoder = LSTM(hidden_dim, return_sequences=True, **kwargs)
for i in range(1, depth[0]):
self.add(LSTM(hidden_dim, return_sequences=True, **kwargs))
self.add(Dropout(dropout))
self.add(self.encoder)
self.add(Dropout(dropout))
self.add(RepeatVector(output_length))
self.add(self.decoder)
for i in range(1, depth[1]):
self.add(LSTM(hidden_dim, return_sequences=True, **kwargs))
self.add(Dropout(dropout))
#if depth[1] > 1:
self.add(TimeDistributedDense(output_dim, activation='softmax')) | masterkeywikz/seq2graph | amr2seq/seq2seq_util/seq2seq_models.py | Python | mit | 3,705 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('mig_main', '0005_remove_memberprofile_edu_bckgrd_form'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='maiden_name',
field=models.CharField(max_length=40, null=True, blank=True),
preserve_default=True,
),
]
| tbpmig/mig-website | mig_main/migrations/0006_userprofile_maiden_name.py | Python | apache-2.0 | 487 |
'''
Created on 10 Aug 2016
@author: bcraenen
'''
import sys
from sklearn.tree import _tree
from root.main.ArffHandler import Arffhandler
from sklearn.ensemble.forest import RandomForestClassifier
from root.other import Fragment.KnowledgeFragment
class ForestHandler(object):
def _get_tree_paths(self, tree, node_id, depth=0):
"""
Returns all paths through the tree as list of node_ids
"""
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
if left_child != _tree.TREE_LEAF:
left_paths = self._get_tree_paths(tree, left_child, depth=depth + 1)
right_paths = self._get_tree_paths(tree, right_child, depth=depth + 1)
for path in left_paths:
path.append(node_id)
for path in right_paths:
path.append(node_id)
paths = left_paths + right_paths
else:
paths = [[node_id]]
return paths
def print_tree(self, tree, root=0, depth=1):
if depth == 1:
print "def predict(X_i):"
indent = " "*depth
print indent + "# node %s: impurity = %.2f" % (str(root), tree.impurity[root])
print indent + "# node %s: number of samples = %s: " % (str(root), str(tree.n_node_samples[root]))
print indent + "# node %s: weighted number of samples = %s: " % (str(root), str(tree.weighted_n_node_samples[root]))
left_child = tree.children_left[root]
right_child = tree.children_right[root]
if left_child == _tree.TREE_LEAF:
print indent + "return %s # (node %d)" % (str(tree.value[root]), root)
else:
print indent + "if X_i[%d] < %.2f: # (node %d)" % (tree.feature[root], tree.threshold[root], root)
self.print_tree(tree, root=left_child, depth=depth + 1)
print indent + "else:"
self.print_tree(tree, root=right_child, depth=depth + 1)
def retrieve_tree_table(self, tree, root=0, depth=1):
if depth == 1:
# do something for root?
pass
self.roots.append(root)
self.features.append(tree.feature[root])
self.thresholds.append(tree.threshold[root])
self.impurities.append(tree.impurity[root])
self.n_node_samples.append(tree.n_node_samples[root])
self.weighted_n_node_samples.append(tree.weighted_n_node_samples[root])
self.values.append(tree.value[root])
left_child = tree.children_left[root]
right_child = tree.children_right[root]
if left_child == _tree.TREE_LEAF:
# do something for leaf?
pass
else:
self.retrieve_tree_table(tree, root=left_child, depth=depth + 1)
self.retrieve_tree_table(tree, root=right_child, depth=depth + 1)
def print_paths(self, forest):
forest_paths = []
for tree in forest.estimators_:
forest_paths.append(self._get_tree_paths(tree.tree_, 0))
print forest_paths
def print_forest(self, forest):
for tree in forest.estimators_:
self.print_tree(tree.tree_)
def retrieve_forest_table(self, forest):
for tree in forest.estimators_:
self.retrieve_tree_table(tree.tree_)
def extract_fragments_tree(self, tree, root=0, depth=1):
if depth == 1:
pass
if tree.feature[root] > -1:
self.test_fragments.append(KnowledgeFragment(feature=tree.feature[root], \
threshold=tree.threshold[root], \
impurity=tree.impurity[root]))
left_child = tree.children_left[root]
right_child = tree.children_right[root]
if left_child == _tree.TREE_LEAF:
pass
else:
self.extract_fragments_tree(tree, root=left_child, depth=depth + 1)
self.extract_fragments_tree(tree, root=right_child, depth=depth + 1)
def extract_fragments(self, forest):
for tree in forest.estimators_:
self.extract_fragments_tree(tree.tree_)
#self.test_fragments.sort(key=lambda x: x.feature, reverse=False)
def uniquify_fragments(self):
counter1 = 0
while counter1 < len(self.test_fragments):
counter2 = counter1 + 1
while counter2 < len(self.test_fragments):
if self.test_fragments[counter1].feature == self.test_fragments[counter2].feature:
if self.test_fragments[counter1].threshold == self.test_fragments[counter2].threshold:
if self.test_fragments[counter1].impurity < self.test_fragments[counter2].impurity:
self.test_fragments[counter1] = self.test_fragments[counter2]
del self.test_fragments[counter2]
counter2 = counter2-1
counter2 = counter2+1
counter1 = counter1+1
def __init__(self):
'''
Constructor
'''
self.roots = []
self.features = []
self.thresholds = []
self.impurities = []
self.n_node_samples = []
self.weighted_n_node_samples = []
self.values = []
self.test_fragments = []
if __name__ == "__main__":
print "Load Dataset"
Arffhandler = Arffhandler()
Arffhandler.Load(sys.argv[1])
Arffhandler.OneHotEncode()
print "Setup data"
inputs = Arffhandler.inputs
output = Arffhandler.output
print "Setup and fit RandomForest"
randomForest = RandomForestClassifier(n_estimators=30, \
criterion="gini", \
max_features="auto", \
max_depth=3, \
min_samples_split=2, \
min_samples_leaf=1, \
min_weight_fraction_leaf=0, \
max_leaf_nodes=None, \
bootstrap=True, \
oob_score=True, \
n_jobs=1, \
random_state=None, \
verbose=0, \
warm_start=False, \
class_weight=None)
randomForest.fit(inputs, output)
# print "estimators_: " + str(randomForest.estimators_)
# print "classes_: " + str(randomForest.classes_)
# print "n_classes_: " + str(randomForest.n_classes_)
# print "n_Features_: " + str(randomForest.n_features_)
# print "n_outputs_: " + str(randomForest.n_outputs_)
# print "feature_importances_: " + str(randomForest.feature_importances_)
# print "oob_score_: " + str(randomForest.oob_score_)
# print "oob_descision_function_: " + str(randomForest.oob_decision_function_)
interpreter = ForestHandler()
# interpreter.print_paths(randomForest)
# interpreter.print_forest(randomForest)
# interpreter.retrieve_forest_table(randomForest)
# print "roots: " + str(interpreter.roots)
# print "features: " + str(interpreter.features)
# print "thresholds: " + str(interpreter.thresholds)
# print "impurities: " + str(interpreter.impurities)
# print "n_node_samples: " + str(interpreter.n_node_samples)
# print "weighted_n_nodes_samples: " + str(interpreter.weighted_n_node_samples)
# print "values: " + str(interpreter.values)
interpreter.extract_fragments(randomForest)
interpreter.uniquify_fragments()
# print "test fragments: " + str(interpreter.test_fragments)
for e in interpreter.test_fragments:
e.calc_gini_impurity(inputs, output)
e.calc_entropy_impurity(inputs, output)
print e
| bcraenen/KFClassifier | other/handlers/ForestHandler.py | Python | gpl-3.0 | 7,770 |
# pylint: disable=W0611
'''
Kivy Base
=========
This module contains the Kivy core functionality and is not intended for end
users. Feel free to look through it, but bare in mind that calling any of
these methods directly may result in an unpredictable behavior as the calls
access directly the event loop of an application.
'''
__all__ = (
'EventLoop',
'EventLoopBase',
'ExceptionHandler',
'ExceptionManagerBase',
'ExceptionManager',
'runTouchApp',
'async_runTouchApp',
'stopTouchApp',
)
import sys
import os
from kivy.config import Config
from kivy.logger import Logger
from kivy.utils import platform
from kivy.clock import Clock
from kivy.event import EventDispatcher
from kivy.lang import Builder
from kivy.context import register_context
# private vars
EventLoop = None
class ExceptionHandler(object):
'''Base handler that catches exceptions in :func:`runTouchApp`.
You can subclass and extend it as follows::
class E(ExceptionHandler):
def handle_exception(self, inst):
Logger.exception('Exception catched by ExceptionHandler')
return ExceptionManager.PASS
ExceptionManager.add_handler(E())
All exceptions will be set to PASS, and logged to the console!
'''
def __init__(self):
pass
def handle_exception(self, exception):
'''Handle one exception, defaults to returning
`ExceptionManager.RAISE`.
'''
return ExceptionManager.RAISE
class ExceptionManagerBase:
'''ExceptionManager manages exceptions handlers.'''
RAISE = 0
PASS = 1
def __init__(self):
self.handlers = []
self.policy = ExceptionManagerBase.RAISE
def add_handler(self, cls):
'''Add a new exception handler to the stack.'''
if cls not in self.handlers:
self.handlers.append(cls)
def remove_handler(self, cls):
'''Remove a exception handler from the stack.'''
if cls in self.handlers:
self.handlers.remove(cls)
def handle_exception(self, inst):
'''Called when an exception occurred in the :func:`runTouchApp`
main loop.'''
ret = self.policy
for handler in self.handlers:
r = handler.handle_exception(inst)
if r == ExceptionManagerBase.PASS:
ret = r
return ret
#: Instance of a :class:`ExceptionManagerBase` implementation.
ExceptionManager = register_context('ExceptionManager', ExceptionManagerBase)
class EventLoopBase(EventDispatcher):
'''Main event loop. This loop handles the updating of input and
dispatching events.
'''
__events__ = ('on_start', 'on_pause', 'on_stop')
def __init__(self):
super(EventLoopBase, self).__init__()
self.quit = False
self.input_events = []
self.postproc_modules = []
self.status = 'idle'
self.stopping = False
self.input_providers = []
self.input_providers_autoremove = []
self.event_listeners = []
self.window = None
self.me_list = []
@property
def touches(self):
'''Return the list of all touches currently in down or move states.
'''
return self.me_list
def ensure_window(self):
'''Ensure that we have a window.
'''
import kivy.core.window # NOQA
if not self.window:
Logger.critical('App: Unable to get a Window, abort.')
sys.exit(1)
def set_window(self, window):
'''Set the window used for the event loop.
'''
self.window = window
def add_input_provider(self, provider, auto_remove=False):
'''Add a new input provider to listen for touch events.
'''
if provider not in self.input_providers:
self.input_providers.append(provider)
if auto_remove:
self.input_providers_autoremove.append(provider)
def remove_input_provider(self, provider):
'''Remove an input provider.
'''
if provider in self.input_providers:
self.input_providers.remove(provider)
def add_event_listener(self, listener):
'''Add a new event listener for getting touch events.
'''
if listener not in self.event_listeners:
self.event_listeners.append(listener)
def remove_event_listener(self, listener):
'''Remove an event listener from the list.
'''
if listener in self.event_listeners:
self.event_listeners.remove(listener)
def start(self):
'''Must be called only once before :meth:`EventLoopBase.run()`.
This starts all configured input providers.'''
self.status = 'started'
self.quit = False
for provider in self.input_providers:
provider.start()
self.dispatch('on_start')
def close(self):
'''Exit from the main loop and stop all configured
input providers.'''
self.quit = True
self.stop()
self.status = 'closed'
def stop(self):
'''Stop all input providers and call callbacks registered using
`EventLoop.add_stop_callback()`.'''
# XXX stop in reverse order that we started them!! (like push
# pop), very important because e.g. wm_touch and WM_PEN both
# store old window proc and the restore, if order is messed big
# problem happens, crashing badly without error
for provider in reversed(self.input_providers[:]):
provider.stop()
if provider in self.input_providers_autoremove:
self.input_providers_autoremove.remove(provider)
self.input_providers.remove(provider)
# ensure any restart will not break anything later.
self.input_events = []
self.stopping = False
self.status = 'stopped'
self.dispatch('on_stop')
def add_postproc_module(self, mod):
'''Add a postproc input module (DoubleTap, TripleTap, DeJitter
RetainTouch are defaults).'''
if mod not in self.postproc_modules:
self.postproc_modules.append(mod)
def remove_postproc_module(self, mod):
'''Remove a postproc module.'''
if mod in self.postproc_modules:
self.postproc_modules.remove(mod)
def remove_android_splash(self, *args):
'''Remove android presplash in SDL2 bootstrap.'''
try:
from android import remove_presplash
remove_presplash()
except ImportError:
Logger.warning(
'Base: Failed to import "android" module. '
'Could not remove android presplash.')
return
def post_dispatch_input(self, etype, me):
'''This function is called by :meth:`EventLoopBase.dispatch_input()`
when we want to dispatch an input event. The event is dispatched to
all listeners and if grabbed, it's dispatched to grabbed widgets.
'''
# update available list
if etype == 'begin':
self.me_list.append(me)
elif etype == 'end':
if me in self.me_list:
self.me_list.remove(me)
# dispatch to listeners
if not me.grab_exclusive_class:
for listener in self.event_listeners:
listener.dispatch('on_motion', etype, me)
# dispatch grabbed touch
me.grab_state = True
for _wid in me.grab_list[:]:
# it's a weakref, call it!
wid = _wid()
if wid is None:
# object is gone, stop.
me.grab_list.remove(_wid)
continue
root_window = wid.get_root_window()
if wid != root_window and root_window is not None:
me.push()
w, h = root_window.system_size
if platform == 'ios' or root_window._density != 1:
w, h = root_window.size
kheight = root_window.keyboard_height
smode = root_window.softinput_mode
me.scale_for_screen(w, h, rotation=root_window.rotation,
smode=smode, kheight=kheight)
parent = wid.parent
# and do to_local until the widget
try:
if parent:
me.apply_transform_2d(parent.to_widget)
else:
me.apply_transform_2d(wid.to_widget)
me.apply_transform_2d(wid.to_parent)
except AttributeError:
# when using inner window, an app have grab the touch
# but app is removed. the touch can't access
# to one of the parent. (i.e, self.parent will be None)
# and BAM the bug happen.
me.pop()
continue
me.grab_current = wid
wid._context.push()
if etype == 'begin':
# don't dispatch again touch in on_touch_down
# a down event are nearly uniq here.
# wid.dispatch('on_touch_down', touch)
pass
elif etype == 'update':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_move', me)
else:
wid.dispatch('on_touch_move', me)
elif etype == 'end':
if wid._context.sandbox:
with wid._context.sandbox:
wid.dispatch('on_touch_up', me)
else:
wid.dispatch('on_touch_up', me)
wid._context.pop()
me.grab_current = None
if wid != root_window and root_window is not None:
me.pop()
me.grab_state = False
def _dispatch_input(self, *ev):
# remove the save event for the touch if exist
if ev in self.input_events:
self.input_events.remove(ev)
self.input_events.append(ev)
def dispatch_input(self):
'''Called by :meth:`EventLoopBase.idle()` to read events from input
providers, pass events to postproc, and dispatch final events.
'''
# first, acquire input events
for provider in self.input_providers:
provider.update(dispatch_fn=self._dispatch_input)
# execute post-processing modules
for mod in self.postproc_modules:
self.input_events = mod.process(events=self.input_events)
# real dispatch input
input_events = self.input_events
pop = input_events.pop
post_dispatch_input = self.post_dispatch_input
while input_events:
post_dispatch_input(*pop(0))
def mainloop(self):
while not self.quit and self.status == 'started':
try:
self.idle()
if self.window:
self.window.mainloop()
except BaseException as inst:
# use exception manager first
r = ExceptionManager.handle_exception(inst)
if r == ExceptionManager.RAISE:
stopTouchApp()
raise
else:
pass
async def async_mainloop(self):
from kivy.base import ExceptionManager, stopTouchApp
while not self.quit and self.status == 'started':
try:
await self.async_idle()
if self.window:
self.window.mainloop()
except BaseException as inst:
# use exception manager first
r = ExceptionManager.handle_exception(inst)
if r == ExceptionManager.RAISE:
stopTouchApp()
raise
else:
pass
Logger.info("Window: exiting mainloop and closing.")
self.close()
def idle(self):
'''This function is called after every frame. By default:
* it "ticks" the clock to the next frame.
* it reads all input and dispatches events.
* it dispatches `on_update`, `on_draw` and `on_flip` events to the
window.
'''
# update dt
Clock.tick()
# read and dispatch input from providers
self.dispatch_input()
# flush all the canvas operation
Builder.sync()
# tick before draw
Clock.tick_draw()
# flush all the canvas operation
Builder.sync()
window = self.window
if window and window.canvas.needs_redraw:
window.dispatch('on_draw')
window.dispatch('on_flip')
# don't loop if we don't have listeners !
if len(self.event_listeners) == 0:
Logger.error('Base: No event listeners have been created')
Logger.error('Base: Application will leave')
self.exit()
return False
return self.quit
async def async_idle(self):
'''Identical to :meth:`idle`, but instead used when running
within an async event loop.
'''
# update dt
await Clock.async_tick()
# read and dispatch input from providers
self.dispatch_input()
# flush all the canvas operation
Builder.sync()
# tick before draw
Clock.tick_draw()
# flush all the canvas operation
Builder.sync()
window = self.window
if window and window.canvas.needs_redraw:
window.dispatch('on_draw')
window.dispatch('on_flip')
# don't loop if we don't have listeners !
if len(self.event_listeners) == 0:
Logger.error('Base: No event listeners have been created')
Logger.error('Base: Application will leave')
self.exit()
return False
return self.quit
def run(self):
'''Main loop'''
while not self.quit:
self.idle()
self.exit()
def exit(self):
'''Close the main loop and close the window.'''
self.close()
if self.window:
self.window.close()
def on_stop(self):
'''Event handler for `on_stop` events which will be fired right
after all input providers have been stopped.'''
pass
def on_pause(self):
'''Event handler for `on_pause` which will be fired when
the event loop is paused.'''
pass
def on_start(self):
'''Event handler for `on_start` which will be fired right
after all input providers have been started.'''
pass
#: EventLoop instance
EventLoop = EventLoopBase()
def _runTouchApp_prepare(widget=None, slave=False):
from kivy.input import MotionEventFactory, kivy_postproc_modules
# Ok, we got one widget, and we are not in slave mode
# so, user don't create the window, let's create it for him !
if widget:
EventLoop.ensure_window()
# Instance all configured input
for key, value in Config.items('input'):
Logger.debug('Base: Create provider from %s' % (str(value)))
# split value
args = str(value).split(',', 1)
if len(args) == 1:
args.append('')
provider_id, args = args
provider = MotionEventFactory.get(provider_id)
if provider is None:
Logger.warning('Base: Unknown <%s> provider' % str(provider_id))
continue
# create provider
p = provider(key, args)
if p:
EventLoop.add_input_provider(p, True)
# add postproc modules
for mod in list(kivy_postproc_modules.values()):
EventLoop.add_postproc_module(mod)
# add main widget
if widget and EventLoop.window:
if widget not in EventLoop.window.children:
EventLoop.window.add_widget(widget)
# start event loop
Logger.info('Base: Start application main loop')
EventLoop.start()
# remove presplash on the next frame
if platform == 'android':
Clock.schedule_once(EventLoop.remove_android_splash)
# in non-slave mode, they are 2 issues
#
# 1. if user created a window, call the mainloop from window.
# This is due to glut, it need to be called with
# glutMainLoop(). Only FreeGLUT got a gluMainLoopEvent().
# So, we are executing the dispatching function inside
# a redisplay event.
#
# 2. if no window is created, we are dispatching event loop
# ourself (previous behavior.)
#
def runTouchApp(widget=None, slave=False):
'''Static main function that starts the application loop.
You can access some magic via the following arguments:
See :mod:`kivy.app` for example usage.
:Parameters:
`<empty>`
To make dispatching work, you need at least one
input listener. If not, application will leave.
(MTWindow act as an input listener)
`widget`
If you pass only a widget, a MTWindow will be created
and your widget will be added to the window as the root
widget.
`slave`
No event dispatching is done. This will be your job.
`widget + slave`
No event dispatching is done. This will be your job but
we try to get the window (must be created by you beforehand)
and add the widget to it. Very useful for embedding Kivy
in another toolkit. (like Qt, check kivy-designed)
'''
_runTouchApp_prepare(widget=widget, slave=slave)
# we are in a slave mode, don't do dispatching.
if slave:
return
try:
EventLoop.mainloop()
finally:
stopTouchApp()
async def async_runTouchApp(widget=None, slave=False, async_lib=None):
'''Identical to :func:`runTouchApp` but instead it is a coroutine
that can be run in an existing async event loop.
``async_lib`` is the async library to use. See :mod:`kivy.app` for details
and example usage.
.. versionadded:: 2.0.0
'''
if async_lib is not None:
Clock.init_async_lib(async_lib)
_runTouchApp_prepare(widget=widget, slave=slave)
# we are in a slave mode, don't do dispatching.
if slave:
return
try:
await EventLoop.async_mainloop()
finally:
stopTouchApp()
def stopTouchApp():
'''Stop the current application by leaving the main loop.
See :mod:`kivy.app` for example usage.
'''
if EventLoop is None:
return
if EventLoop.status in ('stopped', 'closed'):
return
if EventLoop.status != 'started':
if not EventLoop.stopping:
EventLoop.stopping = True
Clock.schedule_once(lambda dt: stopTouchApp(), 0)
return
Logger.info('Base: Leaving application in progress...')
EventLoop.close()
| inclement/kivy | kivy/base.py | Python | mit | 19,012 |
##Copyright 2011-2014 Thomas Paviot ([email protected])
##
##This file is part of pythonOCC.
##
##pythonOCC is free software: you can redistribute it and/or modify
##it under the terms of the GNU Lesser General Public License as published by
##the Free Software Foundation, either version 3 of the License, or
##(at your option) any later version.
##
##pythonOCC is distributed in the hope that it will be useful,
##but WITHOUT ANY WARRANTY; without even the implied warranty of
##MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
##GNU Lesser General Public License for more details.
##
##You should have received a copy of the GNU Lesser General Public License
##along with pythonOCC. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import webbrowser
from OCC.Visualization import Tesselator
import OCC
from time import time
import os
import tempfile
HEADER = """
<head>
<title>pythonOCC @VERSION@ webgl renderer</title>
<meta name='Author' content='Thomas Paviot - [email protected]'>
<meta name='Keywords' content='WebGl,pythonOCC'>
<meta charset="utf-8">
<style type="text/css">
body {
background-color: @background-color@;
margin: 0px;
overflow: hidden;
}
#info {
position: absolute;
top: 96%;
width: 96%;
color: #808080;
padding: 5px;
font-family: Monospace;
font-size: 13px;
text-align: right;
opacity: 1;
}
#pythonocc_rocks {
padding: 5px;
position: absolute;
left: 1%;
top: 85%;
height: 60px;
width: 305px;
border-radius: 5px;
border: 2px solid #f7941e;
opacity: 0.7;
font-family: Arial;
background-color: #414042;
color: #ffffff;
font-size: 16px;
opacity: 0.7;
}
a {
color: #f7941e;
text-decoration: none;
}
a:hover {
color: #ffffff;
}
</style>
</head>
"""
BODY = """
<body>
<div id="container"></div>
<div id="info">
WebGL engine by <a href="http://github.com/mrdoob/three.js" target="_blank">three.js</a>
</div>
<div id="pythonocc_rocks">
<b>pythonOCC @VERSION@ WebGL renderer</b><hr>
CAD in a browser
<a style="font-size:14px;" href=http://www.pythonocc.org>http://www.pythonocc.org</a>
</div>
<script type="text/javascript" src="@Three.jsPath@/three.min.js"></script>
<script type="text/javascript" src="@Three.jsPath@/OrbitControls.js"></script>
<script type="text/javascript" src="@Three.jsPath@/stats.min.js"></script>
@VertexShaderDefinition@
@FragmentShaderDefinition@
<script type="text/javascript" src="./shape.js"></script>
<script type="text/javascript">
var camera, scene, renderer, object, stats, container, shape_material;
var targetRotation = 0;
var targetRotationOnMouseDown = 0;
var targetRotationY = 0;
var targetRotationYOnMouseDown = 0;
var mouseX = 0;
var mouseXOnMouseDown = 0;
var mouseY = 0;
var mouseYOnMouseDown = 0;
var moveForward = false;
var moveBackward = false;
var moveLeft = false;
var moveRight = false;
var moveUp = false;
var moveDown = false;
var windowHalfX = window.innerWidth / 2;
var windowHalfY = window.innerHeight / 2;
init();
animate();
function init() {
container = document.createElement( 'div' );
document.body.appendChild( container );
camera = new THREE.PerspectiveCamera( 50, window.innerWidth / window.innerHeight, 1, 200 );
camera.position.z = 100;
controls = new THREE.OrbitControls( camera );
scene = new THREE.Scene();
scene.add( new THREE.AmbientLight(0x101010));
directionalLight = new THREE.DirectionalLight( 0xffffff );
directionalLight.position.x = 1;
directionalLight.position.y = 1;
directionalLight.position.z = 2;
directionalLight.position.normalize();
scene.add( directionalLight );
light1 = new THREE.PointLight( 0xffffff );
scene.add( light1 );
@Uniforms@
@ShaderMaterialDefinition@
phong_material = new THREE.MeshPhongMaterial( { ambient: 0x000000,
color: 0xffaa00,
specular: 0x555555,
shininess: 30 });
object = new THREE.Mesh( new Shape(), @ShapeMaterial@);
object.overdraw = true;
object.rotation.x = -1.57/2;
scene.add( object );
renderer = new THREE.WebGLRenderer({antialias:true});
renderer.setClearColor("@background-color@");
renderer.setSize( window.innerWidth, window.innerHeight );
container.appendChild( renderer.domElement );
renderer.shadowMapEnabled = true;
renderer.shadowMapType = THREE.PCFShadowMap;
stats = new Stats();
stats.domElement.style.position = 'absolute';
stats.domElement.style.top = '0px';
container.appendChild( stats.domElement );
window.addEventListener( 'resize', onWindowResize, false );
}
function animate() {
requestAnimationFrame( animate );
controls.update();
render();
stats.update();
}
function render() {
@IncrementTime@
renderer.render( scene, camera );
}
function onWindowResize() {
camera.aspect = window.innerWidth / window.innerHeight;
camera.updateProjectionMatrix();
renderer.setSize( window.innerWidth, window.innerHeight );
}
</script>
</body>
"""
class HTMLHeader(object):
def __init__(self, background_color='#000000'):
self._background_color = background_color
def get_str(self):
header_str = HEADER.replace('@background-color@', '%s' % self._background_color)
header_str = header_str.replace('@VERSION@', OCC.VERSION)
return header_str
class HTMLBody(object):
def __init__(self, background_color='#000000', vertex_shader=None,
fragment_shader=None, uniforms=None):
self._background_color = background_color
self._vertex_shader = vertex_shader
self._fragment_shader = fragment_shader
self._uniforms = uniforms
def get_str(self):
# get the location where pythonocc is running from
threejs_build_location = os.sep.join([OCC.__path__[0], 'Display', 'WebGl', 'js'])
body_str = BODY.replace('@Three.jsPath@', '%s' % threejs_build_location)
body_str = body_str.replace('@background-color@', '%s' % self._background_color)
body_str = body_str.replace('@VERSION@', OCC.VERSION)
if (self._fragment_shader is not None) and (self._fragment_shader is not None):
vertex_shader_string_definition = '<script type="x-shader/x-vertex" id="vertexShader">%s</script>' % self._vertex_shader
fragment_shader_string_definition = '<script type="x-shader/x-fragment" id="fragmentShader">%s</script>' % self._fragment_shader
shader_material_definition = """
var vertexShader = document.getElementById( 'vertexShader' ).textContent;
var fragmentShader = document.getElementById( 'fragmentShader' ).textContent;
var shader_material = new THREE.ShaderMaterial( { uniforms: uniforms,
vertexShader: vertexShader,
fragmentShader: fragmentShader } );
"""
if self._uniforms is None:
body_str = body_str.replace('@Uniforms@', 'uniforms ={};\n')
body_str = body_str.replace('@IncrementTime@', '')
else:
body_str = body_str.replace('@Uniforms@', self._uniforms)
if 'time' in self._uniforms:
body_str = body_str.replace('@IncrementTime@', 'uniforms.time.value += 0.05;')
else:
body_str = body_str.replace('@IncrementTime@', '')
body_str = body_str.replace('@VertexShaderDefinition@', vertex_shader_string_definition)
body_str = body_str.replace('@FragmentShaderDefinition@', fragment_shader_string_definition)
body_str = body_str.replace('@ShaderMaterialDefinition@', shader_material_definition)
body_str = body_str.replace('@ShapeMaterial@', 'shader_material')
else:
body_str = body_str.replace('@Uniforms@', '')
body_str = body_str.replace('@VertexShaderDefinition@', '')
body_str = body_str.replace('@FragmentShaderDefinition@', '')
body_str = body_str.replace('@ShaderMaterialDefinition@', '')
body_str = body_str.replace('@ShapeMaterial@', 'phong_material')
body_str = body_str.replace('@IncrementTime@', '')
return body_str
class ThreejsRenderer(object):
def __init__(self, background_color="#123345", vertex_shader=None, fragment_shader=None, uniforms=None, path=None):
if not path:
self._path = tempfile.mkdtemp()
else:
self._path = path
self._js_filename = os.path.join(self._path, "shape.js")
self._html_filename = os.path.join(self._path, "webgl_topods_shape.html" )
self._background_color = background_color
self._vertex_shader = vertex_shader
self._fragment_shader = fragment_shader
self._uniforms = uniforms
def set_vertex_shader(self, vertex_shader):
''' adds a vertex shader definition '''
self._vertex_shader = vertex_shader
def set_fragment_shader(self, fragment_shader):
''' adds a fragment shader '''
self._fragment_shader = fragment_shader
def create_files(self, shape):
''' generate .js and .html files '''
self._shape = shape
print("Tesselate shape ...")
t0 = time()
tess = Tesselator(self._shape)
t1 = time()
print("done in %f s." % (t1-t0))
print("Exporting tesselation to JSON ...")
t2 = time()
tess.ExportShapeToThreejs(self._js_filename)
t3 = time()
print("done in %f s." % (t3-t2))
print("Generating HTML stream ...")
self.GenerateHTMLFile()
print("done.")
return self._js_filename, self._html_filename
def DisplayShape(self, shape):
self.create_files(shape)
print("Opening html output in the default webbrowser ...")
# previous version us a os.system call to the "open" command
# but this is a platform (osx) specific solution
_path = "file:///{0}".format(os.path.join(os.getcwd(), self._html_filename))
webbrowser.open_new_tab(_path)
def GenerateHTMLFile(self):
""" Generate the HTML file to be rendered wy the web browser
"""
fp = open(self._html_filename, "w")
fp.write("<!DOCTYPE HTML>")
fp.write('<html lang="en">')
# header
fp.write(HTMLHeader(self._background_color).get_str())
# body
fp.write(HTMLBody(self._background_color,
self._vertex_shader,
self._fragment_shader,
self._uniforms).get_str())
fp.write("</html>\n")
fp.close()
if __name__ == "__main__":
from OCC.BRepPrimAPI import BRepPrimAPI_MakeBox
box_shp = BRepPrimAPI_MakeBox(10., 20., 30.).Shape()
my_ren = ThreejsRenderer()
my_ren.DisplayShape(box_shp)
| sven-hm/pythonocc-core | src/addons/Display/WebGl/threejs_renderer.py | Python | lgpl-3.0 | 12,135 |
from .SignalDataCanvas import SignalDataCanvas
from .SignalDataCanvas import SignalDataCanvasFast
| peace098beat/pyside_cookbook | 10_MatplotlibVSPygraph/mplcanvas/__init__.py | Python | gpl-3.0 | 104 |
import logging
from django.conf import settings
from django.core.management.base import NoArgsCommand
from django.db import connection
from mailer.engine import send_all
# allow a sysadmin to pause the sending of mail temporarily.
PAUSE_SEND = getattr(settings, "MAILER_PAUSE_SEND", False)
class Command(NoArgsCommand):
help = "Do one pass through the mail queue, attempting to send all mail."
def handle_noargs(self, **options):
logging.basicConfig(level=logging.DEBUG, format="%(message)s")
logging.info("-" * 72)
# if PAUSE_SEND is turned on don't do anything.
if not PAUSE_SEND:
send_all()
connection.close()
else:
logging.info("sending is paused, quitting.")
| richardbarran/django-mailer | mailer/management/commands/send_mail.py | Python | mit | 759 |
from ..errors import *
from .core import BoundObject, Type
from .stats import ScopeStats
class VoidType(Type, BoundObject):
def __init__(self, name:str = ""):
BoundObject.__init__(self, name)
def verify(self):
self._stats = ScopeStats(self.parent)
self.stats.static = True
self.stats.forward = False
def resolveType(self):
raise InternalError("Void is typeless")
@property
def local_context(self):
raise InternalError("Void does not have a local context")
def checkCompatibility(self, other:Type, check_cache = None):
# Void is compatible with all
return True
def __repr__(self):
return "{}<{}>".format(self.__class__.__name__, self.name)
| CameronLonsdale/jam | compiler/lekvar/void_type.py | Python | mit | 746 |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005 Ole André Vadla Ravnås <[email protected]>
# Copyright (C) 2006-2007 Ali Sabil <[email protected]>
# Copyright (C) 2007 Johann Prieur <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
from papyon.gnet.constants import *
from papyon.gnet.proxy.proxyfiable import ProxyfiableClient
from sock import SocketClient
import gobject
__all__ = ['TCPClient']
class TCPClient(SocketClient, ProxyfiableClient):
"""Asynchronous TCP client class.
@sort: __init__, open, send, close
@undocumented: do_*, _watch_*, __io_*, _connect_done_handler
@since: 0.1"""
def __init__(self, host, port):
"""initializer
@param host: the hostname to connect to.
@type host: string
@param port: the port number to connect to.
@type port: integer > 0 and < 65536"""
SocketClient.__init__(self, host, port, AF_INET, SOCK_STREAM)
ProxyfiableClient.__init__(self)
gobject.type_register(TCPClient)
| Kjir/papyon | papyon/gnet/io/tcp.py | Python | gpl-2.0 | 1,708 |
#! /usr/bin/python
'''
@author: Alister Maguire
Given a counts file and a taxa file, condense
repeated genus' and their counts, and output
a file that maps genus names to their counts
for each experiment.
'''
import argparse
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("counts_file")
parser.add_argument("taxa_file")
args = parser.parse_args()
taxa_f = open(args.taxa_file, "r")
counts_f = open(args.counts_file, "r")
condensed_counts = []
genus_dct = {}
genus_lst = []
count_lst = []
count_dct = {}
taxa = taxa_f.readlines()
counts = counts_f.readlines()
for c in counts:
count_lst.append(c.split())
#create a dictionary that associates
#experiment IDs with lists for counts
c_size = len(counts)
for i in range(1, c_size):
count_dct[count_lst[i][0]] = []
#retrieve the genus names and their
#associated OTU values (look for repeats)
for i in range(len(taxa)):
taxa[i] = taxa[i].split()
j = -3
genus = taxa[i][j]
j -= 1
#condense genus names that have been
#split into pieces
while not is_number(taxa[i][j]):
genus = taxa[i][j] + " " + genus
j -= 1
#if genus in exempt:
# continue
if genus not in genus_dct:
genus_dct[genus] = []
genus_dct[genus].append(taxa[i][0])
genus_lst.append(genus)
g_size = len(genus_lst)
#create a list for condensed counts
#that we can use to map genus' with their counts
for i in range(1, len(count_lst)):
condensed_counts.append([])
condensed_counts[i-1] = ([0]*(g_size+1))
for i in range(0, g_size):
for j in range(1, len(count_lst)):
total = 0
for otu in genus_dct[genus_lst[i]]:
#the otu number is an index into the counts list
idx = int(otu[3:]) + 1
total += int(count_lst[j][idx])
condensed_counts[j-1][0] = count_lst[j][0]
condensed_counts[j-1][i] = total
genus_counts_f = open("condensed_counts.txt", "w+")
#Write the new file that assoicates genus names
#with experiment counts. The first line of the
#file contains all of the genus names, and the position
#of this name is an index into the experiment counts.
#The following lines are of the form
# Experiment_ID, count0, count1, ...., countn
#
genus_keys = ""
for genus in genus_lst:
genus_keys = genus_keys + ", " + genus
genus_keys = genus_keys[2:] + "\n"
genus_counts_f.write(genus_keys)
for row in condensed_counts:
exp_counts = ""
for col in row:
exp_counts = exp_counts + ", " + str(col)
exp_counts = exp_counts[2:] + "\n"
genus_counts_f.write(exp_counts)
genus_counts_f.close()
taxa_f.close()
counts_f.close()
| aowen87/PhyloViewer | src/condense.py | Python | gpl-3.0 | 3,122 |
# Copyright 2016 The Switch Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
"""
This module defines hydroelectric system components. It creates a hydraulic
system that works in parallel with the electric one. They are linked through
the power generation process at hydroelectric generators. The module builds
on top of generic generators, adding components linking power generation
with water use and availability. It requires the specification of the
water system topology, such as water nodes, reservoirs, water connections
and hydroelectric projects.
The hydraulic system is expected to be operational throughout the whole
time horizon of the simulation.
The water network is a graph composed of nodes and connections. Nodes
represent rivers, lakes or reservoirs, while connections represent flows
between nodes where generating stations may be located. All nodes can have
inflows and consumption that are independent of the hydrological network and
are specified with external data. All connections can control flow between
nodes, potentially limited by minimum flow constraints, or dictated by
geological filtration. All flow is currently downstream, but pumped hydro may
be implemented at a later date. Sink nodes have the ability to spill outside
of the hydraulic system. Reservoir nodes track their water levels during
investment periods, and have their levels externally determined at the
beginning and end of investment periods.
"""
import os
from pyomo.environ import *
def define_components(mod):
"""
WATER_NODES is the set of nodes of the water system that do not have
storage capacity. These usually represent confluence and/or divergence
of different water flows. Members of this set can be abbreviated as
wn or wnode.
WATER_NODES_BALANCE_POINTS is a set showing all the combinations of
water nodes and timepoints, in which the conservation of mass law
must be enforced. For now it is initialized as the cross product of
the WATER_NODES and TIMEPOINTS sets, but it should be flexibilized
to allow for addition and removal of water nodes in intermediate
timepoints of the simulation horizon.
WATER_SINKS_BALANCE_POINTS is a set showing all the combinations of
water sinks and timepoints, in which water "spilling" is allowed when
enforcing the conservation of mass law. They usually represent water
intakes in a river (where water that is not extracted just keeps on
flowing through the river) and actual sinks, such as an ocean or
lake (or any point after which the modeling of the hydraulic system
is irrelevant for the power system).
wnode_constant_inflow[wn] is the value of constant inflow of
water at each node of the hydraulic system throughout the whole
simulation. Inflow refers to an external source of water that comes
into the system at the water node, such as rainfall. Water flows
that originate from an upstream model component, such as another water
node or a reservoir, are decided by the model and so must not be
specified here. This parameter is specified in cubic meters per second
(cumec) and defaults to 0.
wnode_constant_consumption[wn] is the value of constant
consumption of water at each node of the hydraulic system throughout
the whole simulation. Consumption refers to any activity that takes
water out of the modeled hydraulic system, such as crop irrigation,
human and animal consumption, minimum ecological flow for a sink
node, etc. This parameter is specified in cubic meters per second
(cumec) and defaults to 0.
wnode_tp_inflow[wn, t] and wnode_tp_consumption[wn, t]
are the values of water inflow and consumption at each node of the
hydraulic system specified at each timepoint. These are optional
parameters that default to wnode_constant_inflow_cumec and
wnode_constant_consumption_cumec. Depending on data availability,
these parameters may be used to represent different phenomena. In
example, the Chilean datasets specify water inflows due to rainfall
and melting snows at different nodes in a weekly basis. So, all
simulated timepoints that belong to the same week will have the same
wnode_tp_inflow_cumec parameter specified for each water node.
wn_is_sink[WATER_NODES] is a binary flag indicating whether a water
node is a sink. These nodes need not obey the law of conservation of
mass, so that water flows that go into them may be greater than the
ones that flow out. The main use case for these is to be the end of a
water basin (such as the ocen or a lake).
node_spillage_cost[WATER_NODES] is a derived parameter that sets
the cost in US$/(cubic meters) of spilling water out of the water
network. This is equivalent to disobeying the conservation of mass
law when balancing flows in each node and timepoint, so cost is
set to a high default value. This parameter lets the model spill
water freely in sink nodes, but relaxes the equality constraint
for mass balance. This aids the solver into obtaining optimal
solutions significantly faster and with small water spillages.
NodeSpillage[WATER_NODES_BALANCE_POINTS] are the decisions of
water spillage out of the water network at each node and timepoint
in cubic meters per second.
RESERVOIRS is a subset of water nodes that are reservoirs. These
require additional characterization. Members of this set may be
abbreviated as r or res.
res_min_vol[r] is a parameter that specifies the minimum storage
capacity of the reservoir in millions of cubic meters. Usually
this will be a positive value, since reservoirs cannot be
completely emptied because of physical limitations, but it is
allowed to be 0 in case relative volumes want to be used.
res_max_vol[r] is a parameter that specifies the maximum storage
capacity of the reservoir in millions of cubic meters. If at any
timepoint the volume of water in the reservoir reaches this limit,
spillage may occur to mantain the mass balance. This parameter is
determined by the physical characteristics of the reservoir.
RESERVOIRS_BALANCE_POINTS is a set showing all the combinations of
reservoirs and timepoints, in which the conservation of mass law
must be enforced. For now it is initialized as the cross product of
the RESERVOIRS and TIMEPOINTS sets, but it should be flexibilized
to allow for addition and removal of reservoirs in intermediate
timepoints of the simulation horizon.
res_min_vol_tp[r, t] and res_max_vol_tp[r, t] are the
values of allowable minimum and maximum water volume at each
reservoir specified at each timepoint. These may be used to represent
seasonal restrictions in water levels at any reservoir. In example,
minimum volumes of water must be kept during summer at some reservoirs
to allow for leisure and tourism activities, such as water sports.
These parameters are optional and must be specified in cubic meters
and default to reservoir_min_vol and reservoir_max_vol.
initial_res_vol[r] is a parameter that states the starting volume
of stored water in each reservoir in millions of cubic meters. The
same value will be used as a starting point in each period of the
simulation, independent of which was the final level at the last
timepoint of the previous period. This methodology has been used
in several expansion planning papers that include reservoir hydro
power plants, because it allows decoupling the operational
subproblems of each period and thus speeding up the optimization
considerably.
final_res_vol[r] is a parameter that states the final volume of
stored water in each reservoir in millions of cubic meters. This
level is enforced as a minimum for the final volume. Usually, this
parameter is specified to be the same as the initial volume, so
that the reservoir may only arbitrage with the water inflows that
come into it during the period.
ReservoirVol[r, t] is a variable that tracks the volume of water
at each reservoir in the beginging of each timepoint, specified in
cubic meters. This variable is determined by the volume in the
previous timepoint, the inflows and the outflows.
ReservoirSurplus[r, p] is the amount of water in the reservoir at the end
of each period in excess of final_res_vol[r].
WATER_CONNECTIONS is the set of flows that begin and end in different
water bodies, such as reservoirs and nodes. The model decides how much
water is "dispatched" through each connection at each timepoint. Water
may only flow in one direction, so "to" and "from" parameters must be
inputted. Members of this set may be abbreviated by wc or wcon.
WCONS_DISPATCH_POINTS is the set of the cross product between
TIMEPOINTS and WATER_CONNECTIONS. In the future, this should be
flexibilized to allow for new water connections to be created within
the simulation horizon (as with WATER_NODES_BALANCE_POINTS and
RESERVOIRS_BALANCE_POINTS).
water_node_from[wc] is a parameter that specifies the water body from
which the connection extracts water.
water_node_to[wc] is a parameter that specifies the water body to which
the connection injects water.
wc_capacity[wc] is a parameter that specifies the limit, in cubic
meters per second, of the water flow through the connection. This
datum is difficult to find, but could be relevant in some cases where
rivers or streams have a defined capacity and greater flows could
cause them to collapse and/or flood the surrounding area. Defaults
to 9999 cumec.
min_eco_flow[wc, t] is a parameter that indicates the minimum ecological
water flow that must be dispatched through each water connection at each
timepoint, specified in cubic meters per second. The parameter is
indexed by timepoint to allow for representation of seasonal or hourly
ecological or social constraints. This is an optional parameter that
defaults to 0.
DispatchWater[wc, t] is a variable that represents how much water is
flowing through each water connection at each timepoint. The lower bound is
m.min_eco_flow[wc, t] and the upper bound is m.wc_capacity[wc].
Enforce_Wnode_Balance[(wn, t) for (wn, t) in WATER_NODES_BALANCE_POINTS]
is a constraint that enforces conservation of mass at water nodes. This
accounts for any spills at sink nodes, or any change in reservoir volume
between one timepoint and the next. This also links the reservoir volumes
between timepoints, and enforces the final reservoir volume constraint.
HYDRO_PROJECTS is a subset of PROJECTS which are to be linked with the
hydraulic system. Both reservoir generators as well as hydroelectric
projects in series must be specified as HYDRO_PROJECTS and will be
treated the same. Members of this set may be abbreviated as hproj.
HYDRO_PROJ_DISPATCH_POINTS is a subset of PROJ_DISPATCH_POINTS only with
projects that belong to the HYDRO_PROJECTS set. This set is used to
index the electricity generation decisions.
hydro_efficiency[hproj] is a parameter that specifies the hydraulic
efficiency of a project, in units of MW/(cubic meters per second).
The amount of power generated by a hydroelectric generator with a
certain flow depends on the water head. This creates a non linear
relationship between the generated power per water flow and the volume
of stored water. In this module the efficiency is assumed to be a
constant for each project, to keep the problem linead.
hydraulic_location[hproj] is a parameter that specifies the water
connection in which each hydro project is located. Multiple projects
may be located at the same connection, which allows modeling of
cascading generation.
TurbinatedFlow[hproj, t] is a variable that represents the water flow,
in cubic meters per second, that is passed through the turbines of each
project at each timepoint. This is the flow that is used to generate
electricity.
SpilledFlow[hproj, t] is a variable that represents the water flow,
in cubic meters per second, that is spilled by each project at each
timepoint. All spilled water is considered to be returned to the same
water connection from which it was originally extracted.
Enforce_Hydro_Generation[hproj, t] is the constraint that forces power
generation at each hydro project to be equal to the flow of water that
goes through its turbines, times its hydro efficiency. This relation
is observed at each timepoint.
Enforce_Hydro_Extraction[hproj, t] is the constraint that mantains the
conservation of mass at each project's water extraction point, so that
the sum of the flows that go through its turbines and the one that is
spilled are equal to the water that is flowing at each timepoint through
the water connection where it is located.
-----
TODO:
-Implement pumped storage
-Allow setting the water spillage cost as a parameter. The default
of 10000 US$/cumecshould prevent significant water spillage in
non-sink nodes in mostcases. Nonetheless, some users could want to
lower the penalties forsome nodes in order to get faster solution
times, and other could want to raise them to avoid spilling completely.
"""
#################
# Nodes of the water network
mod.WATER_NODES = Set()
mod.WATER_NODES_BALANCE_POINTS = Set(
dimen=2,
initialize=lambda m: m.WATER_NODES * m.TIMEPOINTS)
mod.wnode_constant_inflow = Param(
mod.WATER_NODES,
within=NonNegativeReals,
default=0.0)
mod.wnode_constant_consumption = Param(
mod.WATER_NODES,
within=NonNegativeReals,
default=0.0)
mod.wnode_tp_inflow = Param(
mod.WATER_NODES_BALANCE_POINTS,
within=NonNegativeReals,
default=lambda m, wn, t: m.wnode_constant_inflow[wn])
mod.wnode_tp_consumption = Param(
mod.WATER_NODES_BALANCE_POINTS,
within=NonNegativeReals,
default=lambda m, wn, t: m.wnode_constant_consumption[wn])
mod.wn_is_sink = Param(
mod.WATER_NODES,
within=Boolean)
mod.min_data_check('wn_is_sink')
mod.node_spillage_cost = Param(
mod.WATER_NODES,
within=NonNegativeReals,
initialize=lambda m, wn: (1 - m.wn_is_sink[wn]) * 10000)
mod.NodeSpillage = Var(
mod.WATER_NODES_BALANCE_POINTS,
within=NonNegativeReals)
#################
# Reservoir nodes
mod.RESERVOIRS = Set(
within=mod.WATER_NODES)
mod.RESERVOIRS_BALANCE_POINTS = Set(
dimen=2,
initialize=lambda m: m.RESERVOIRS * m.TIMEPOINTS)
mod.res_min_vol = Param(
mod.RESERVOIRS,
within=NonNegativeReals)
mod.res_max_vol = Param(
mod.RESERVOIRS,
within=PositiveReals,
validate=lambda m, val, r: val >= m.res_min_vol[r])
mod.res_min_vol_tp = Param(
mod.RESERVOIRS_BALANCE_POINTS,
within=NonNegativeReals,
default=lambda m, r, t: m.res_min_vol[r])
mod.res_max_vol_tp = Param(
mod.RESERVOIRS_BALANCE_POINTS,
within=NonNegativeReals,
default=lambda m, r, t: m.res_max_vol[r])
mod.initial_res_vol = Param(
mod.RESERVOIRS,
within=NonNegativeReals,
validate=lambda m, val, r: (
m.res_min_vol[r] <= val <= m.res_max_vol[r]))
mod.final_res_vol = Param(
mod.RESERVOIRS,
within=NonNegativeReals,
validate=lambda m, val, r: (
m.res_min_vol[r] <= val <= m.res_max_vol[r]))
mod.min_data_check('res_min_vol', 'res_max_vol', 'initial_res_vol',
'final_res_vol')
def ReservoirVol_bounds(m, r, t):
# In the first timepoint of each period, this is externally defined
if t == m.PERIOD_TPS[m.tp_period[t]].first():
return(m.initial_res_vol[r], m.initial_res_vol[r])
# In all other timepoints, this is constrained by min & max params
else:
return(m.res_min_vol[r], m.res_max_vol[r])
mod.ReservoirVol = Var(
mod.RESERVOIRS_BALANCE_POINTS,
within=NonNegativeReals,
bounds=ReservoirVol_bounds)
mod.ReservoirSurplus = Var(
mod.RESERVOIRS, mod.PERIODS,
within=NonNegativeReals)
################
# Edges of the water network
mod.WATER_CONNECTIONS = Set()
mod.WCONS_DISPATCH_POINTS = Set(
dimen=2,
initialize=lambda m: m.WATER_CONNECTIONS * m.TIMEPOINTS)
mod.water_node_from = Param(
mod.WATER_CONNECTIONS,
within=mod.WATER_NODES)
mod.water_node_to = Param(
mod.WATER_CONNECTIONS,
within=mod.WATER_NODES)
mod.wc_capacity = Param(
mod.WATER_CONNECTIONS,
within=PositiveReals,
default=float('inf'))
mod.min_eco_flow = Param(
mod.WCONS_DISPATCH_POINTS,
within=NonNegativeReals,
default=0.0)
mod.min_data_check('water_node_from', 'water_node_to')
mod.CONNECTIONS_DIRECTED_INTO_WN = Set(
mod.WATER_NODES,
initialize=lambda m, wn: set(wc for wc in m.WATER_CONNECTIONS
if m.water_node_to[wc] == wn))
mod.CONNECTIONS_DIRECTED_OUT_OF_WN = Set(
mod.WATER_NODES,
initialize=lambda m, wn: set(wc for wc in m.WATER_CONNECTIONS
if m.water_node_from[wc] == wn))
mod.DispatchWater = Var(
mod.WCONS_DISPATCH_POINTS,
within=NonNegativeReals,
bounds=lambda m, wc, t: (m.min_eco_flow[wc, t], m.wc_capacity[wc]))
def Enforce_Wnode_Balance_rule(m, wn, t):
# Sum inflows and outflows from and to other nodes
dispatch_inflow = sum(m.DispatchWater[wc, t]
for wc in m.CONNECTIONS_DIRECTED_INTO_WN[wn])
dispatch_outflow = sum(m.DispatchWater[wc, t]
for wc in m.CONNECTIONS_DIRECTED_OUT_OF_WN[wn])
# Reservoir flows: 0 for non-reservoirs
reservoir_fill_rate = 0.0
if wn in m.RESERVOIRS:
p = m.tp_period[t]
end_volume = 0.0
if t != m.PERIOD_TPS[p].last():
t_next = m.PERIOD_TPS[p].next(t)
end_volume = m.ReservoirVol[wn, t_next]
else:
end_volume = m.final_res_vol[wn] + m.ReservoirSurplus[wn, p]
reservoir_fill_rate = (
(end_volume - m.ReservoirVol[wn, t]) * 1000000.0 /
(m.tp_duration_hrs[t] * 3600))
# Conservation of mass flow
return (
m.wnode_tp_inflow[wn, t] + dispatch_inflow == \
m.wnode_tp_consumption[wn, t] + dispatch_outflow \
+ m.NodeSpillage[wn, t] + reservoir_fill_rate)
mod.Enforce_Wnode_Balance = Constraint(
mod.WATER_NODES_BALANCE_POINTS,
rule=Enforce_Wnode_Balance_rule)
mod.Nodes_Spillage_Costs = Expression(
mod.TIMEPOINTS,
rule=lambda m, t: sum(m.NodeSpillage[wn,t] * m.node_spillage_cost[wn]
for wn in m.WATER_NODES))
mod.cost_components_tp.append('Nodes_Spillage_Costs')
################
# Hydro projects
mod.HYDRO_PROJECTS = Set(
validate=lambda m, val: val in m.PROJECTS)
mod.HYDRO_PROJ_DISPATCH_POINTS = Set(
initialize=mod.PROJ_DISPATCH_POINTS,
filter=lambda m, proj, t: proj in m.HYDRO_PROJECTS)
mod.hydro_efficiency = Param(
mod.HYDRO_PROJECTS,
within=PositiveReals,
validate=lambda m, val, proj: val <= 10)
mod.hydraulic_location = Param(
mod.HYDRO_PROJECTS,
validate=lambda m, val, proj: val in m.WATER_CONNECTIONS)
mod.TurbinatedFlow = Var(
mod.HYDRO_PROJ_DISPATCH_POINTS,
within=NonNegativeReals)
mod.SpilledFlow = Var(
mod.HYDRO_PROJ_DISPATCH_POINTS,
within=NonNegativeReals)
mod.Enforce_Hydro_Generation = Constraint(
mod.HYDRO_PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (m.DispatchProj[proj, t] ==
m.hydro_efficiency[proj] * m.TurbinatedFlow[proj, t]))
mod.Enforce_Hydro_Extraction = Constraint(
mod.HYDRO_PROJ_DISPATCH_POINTS,
rule=lambda m, proj, t: (m.TurbinatedFlow[proj, t] +
m.SpilledFlow[proj, t] ==
m.DispatchWater[m.hydraulic_location[proj], t]))
def load_inputs(mod, switch_data, inputs_dir):
"""
Import hydro data to model hydroelectric projects in reservoirs and
in series.
The files water_nodes.tab, reservoirs.tab, water_connections.tab and
hydro_projects.tab are mandatory, since they specify the hydraulic
system's topology and basic characterization.
Files water_node_tp_flows, reservoir_tp_data.tab and min_eco_flows.tab
are optional, since they specify information in a timepoint basis that
has constant values to default to.
Run-of-River hydro projects should not be included in this file; RoR
hydro is treated like any other variable renewable resource, and
expects data in variable_capacity_factors.tab.
"""
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'water_nodes.tab'),
auto_select=True,
index=mod.WATER_NODES,
optional_params=['mod.wnode_constant_inflow',
'mod.wnode_constant_consumption'],
param=(mod.wn_is_sink, mod.wnode_constant_inflow,
mod.wnode_constant_consumption))
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'water_node_tp_flows.tab'),
auto_select=True,
optional_params=['mod.wnode_tp_inflow', 'mod.wnode_tp_consumption'],
param=(mod.wnode_tp_inflow, mod.wnode_tp_consumption))
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'reservoirs.tab'),
auto_select=True,
index=mod.RESERVOIRS,
param=(mod.res_min_vol, mod.res_max_vol,
mod.initial_res_vol, mod.final_res_vol))
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'reservoir_tp_data.tab'),
optional=True,
auto_select=True,
optional_params=['mod.res_max_vol_tp', 'mod.res_min_vol_tp'],
param=(mod.res_max_vol_tp, mod.res_min_vol_tp))
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'water_connections.tab'),
auto_select=True,
index=mod.WATER_CONNECTIONS,
param=(mod.water_node_from, mod.water_node_to, mod.wc_capacity))
switch_data.load_aug(
optional=True,
filename=os.path.join(inputs_dir, 'min_eco_flows.tab'),
auto_select=True,
param=(mod.min_eco_flow))
switch_data.load_aug(
filename=os.path.join(inputs_dir, 'hydro_projects.tab'),
auto_select=True,
index=mod.HYDRO_PROJECTS,
param=(mod.hydro_efficiency, mod.hydraulic_location))
| bmaluenda/switch | switch_mod/generators/hydro_system.py | Python | apache-2.0 | 23,261 |
""" This module sets up django environment so that other scripts need not worry about these mundane tasks.
"""
def main():
try:
from pysis.settings import settings
except ImportError:
try:
from pysis import settings
except ImportError:
import sys
sys.stderr.write('Can not find settings module. \n Aborting...')
sys.exit(1)
from django.core.management import setup_environ
setup_environ(settings)
if __name__ == '__main__':
main()
| sramana/pysis | scripts/bootstrap_django.py | Python | unlicense | 527 |
"""Library generic utils."""
| lucasrodes/whatstk | whatstk/utils/__init__.py | Python | gpl-3.0 | 29 |
#!/usr/bin/python3
import os
import glob
import RPi.GPIO as GPIO
import time
import datetime
GPIO.setmode(GPIO.BCM) # set board mode to broadcom
GPIO.setwarnings(False)
#GPIO.setup(6, GPIO.OUT) # set up pins for output mode
#GPIO.setup(6, GPIO.HIGH) # set pin states to high
#GPIO.setup(13, GPIO.OUT) # set up pin 13 for output
#GPIO.setup(13, GPIO.HIGH)
#GPIO.setup(19, GPIO.OUT) # set up pin 19 for output
#GPIO.setup(19, GPIO.HIGH)
#GPIO.setup(26, GPIO.OUT) #set up pin 26 for output
#GPIO.setup(26, GPIO.HIGH)
pinList = [6, 13, 19, 26]
for i in pinList:
GPIO.setup(i, GPIO.OUT)
GPIO.output(i, GPIO.HIGH)
os.system('modprobe w1-gpio')
os.system('modprobe w1-therm')
base_dir = '/sys/bus/w1/devices/'
device_folder = glob.glob(base_dir + '28*')
device_file = (device_folder + '/w1_slave')
def read_temp_raw() :
f = open(device_file[0], 'r')
lines = f.readlines()
f.close()
return lines
def read_temp() :
lines = read_temp_raw()
while lines[0].strip()[-3:] != 'YES': #Checking for valid temperature reading. If none found, wait and try again.
time.sleep(0.2)
lines = read_temp_raw()
equals_pos = lines[1].find('t=')
if equals_pos != -1:
temp_string = lines[1][equals_pos+2:] #finds temp in raw input
temp_c = float(temp_string) / 1000.0 #convert raw input to *C
temp_f = temp_c * 9.0 / 5.0 + 32.0 #convertt to *F
return temp_f
try:
while True:
print(read_temp())
if read_temp() < 70.0 : ###! Change this number to set thermostat !###
GPIO.output(6, GPIO.LOW) # turn on relay 1 when temp < temp set above (* F)
else:
GPIO.output(6, GPIO.HIGH) # turn off relay 1 when temp > temp set above (* F)
time.sleep(1)
except KeyboardInterrupt:
GPIO.cleanup()
print(" ...Quitting")
| dragonbeard/calvin | calvin.py | Python | gpl-3.0 | 1,854 |
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START aiplatform_create_batch_prediction_job_video_object_tracking_sample]
from google.cloud import aiplatform
from google.protobuf import json_format
from google.protobuf.struct_pb2 import Value
def create_batch_prediction_job_video_object_tracking_sample(
project: str,
display_name: str,
model_name: str,
gcs_source_uri: str,
gcs_destination_output_uri_prefix: str,
location: str = "us-central1",
api_endpoint: str = "us-central1-aiplatform.googleapis.com",
):
# The AI Platform services require regional API endpoints.
client_options = {"api_endpoint": api_endpoint}
# Initialize client that will be used to create and send requests.
# This client only needs to be created once, and can be reused for multiple requests.
client = aiplatform.gapic.JobServiceClient(client_options=client_options)
model_parameters_dict = {"confidenceThreshold": 0.0}
model_parameters = json_format.ParseDict(model_parameters_dict, Value())
batch_prediction_job = {
"display_name": display_name,
# Format: 'projects/{project}/locations/{location}/models/{model_id}'
"model": model_name,
"model_parameters": model_parameters,
"input_config": {
"instances_format": "jsonl",
"gcs_source": {"uris": [gcs_source_uri]},
},
"output_config": {
"predictions_format": "jsonl",
"gcs_destination": {"output_uri_prefix": gcs_destination_output_uri_prefix},
},
}
parent = f"projects/{project}/locations/{location}"
response = client.create_batch_prediction_job(
parent=parent, batch_prediction_job=batch_prediction_job
)
print("response:", response)
# [END aiplatform_create_batch_prediction_job_video_object_tracking_sample]
| googleapis/python-aiplatform | samples/snippets/job_service/create_batch_prediction_job_video_object_tracking_sample.py | Python | apache-2.0 | 2,379 |
# Copyright (C) 2020 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
import os
import psycopg2
from processing.main import (
get_latest_id,
connect,
PROCESSED_VIEW,)
LAST_PROCESSED = """
CREATE OR REPLACE VIEW {} AS
SELECT MAX(id) AS last_id FROM records WHERE processed = True
""".format(PROCESSED_VIEW)
class Conf():
db_name = os.environ['POSTGRES_DB']
db_host = os.environ['POSTGRES_HOSTNAME']
db_user = os.environ['POSTGRES_USER']
db_passwd = os.environ['POSTGRES_PASSWORD']
def check_processed_id(conn):
cursor = conn.cursor()
try:
get_latest_id(cursor)
except psycopg2.errors.UndefinedTable:
cursor.execute(LAST_PROCESSED)
finally:
cursor.close()
def main():
try:
conn = connect(Conf())
check_processed_id(conn)
conn.close()
except psycopg2.OperationalError as op_error:
print("could not connect to server: Connection refused")
exit(1)
if __name__ == '__main__':
main()
# vi: ts=4 et sw=4 sts=4
| alexjch/telemetrics-backend | deployments/services/processing/entrypoint.py | Python | apache-2.0 | 1,043 |
# -*- coding: utf-8 -*-
import re
import os
from datetime import datetime
from lxml import etree, html
from django import db
from eduskunta.importer import Importer, ParseError
from parliament.models.session import *
from parliament.models.member import Member
from .member import fix_mp_name
VOTE_MAP = {
'Jaa': 'Y',
'Ei': 'N',
'Tyhjää': 'E',
'Poissa': 'A',
}
PROCESSING_STEP = {
'Ensimmäinen käsittely': '1H',
'Toinen käsittely': '2H',
'Ainoa käsittely': 'OH',
'Yksi käsittely': 'OH',
'Ulkopuolella päiväjärjestyksen': 'OA',
'Osittain ainoa, osittain toinen käsittely': '2H-OH',
'Toinen käsittely, ainoa käsittely': '2H-OH',
'Jatkettu ensimmäinen käsittely': '1H',
'Palautekeskustelu': '??',
'Lähetekeskustelu': '??',
'Vaalit': 'EL',
'Vaaleja': 'EL',
'Täysistuntokäsittely': 'PV',
'Ilmoituksia': '??',
'Kolmas käsittely': '??',
}
def parse_vote(mp, vote):
(name, party) = mp.split('/')
name = name.strip()
party = party.strip()
v = VOTE_MAP[vote]
if 'puhemiehe' in party:
v = 'S'
party = party.split()[0]
name = fix_mp_name(name)
return {'name': name, 'party': party, 'vote': v}
class VoteImporter(Importer):
VOTE_URL = '/triphome/bin/thw.cgi/trip/?${html}=aax/aax4000&${base}=aanestysu&aanestysvpvuosi=%d&istuntonro=%s&aanestysnro=%d'
VOTE_LIST_URL = '/triphome/bin/aax3000.sh?VAPAAHAKU=aanestysvpvuosi=%i'
BEGIN_YEAR = 1999
CACHE_DIR = 'votes'
def save_session(self, pv, info):
dt = datetime.strptime('%s %s' % (str(pv.plsess.date), info['time']),
'%Y-%m-%d %H.%M')
pv.time = dt
assert pv.time.date() == pv.plsess.date, 'Vote %s time mismatch (%s vs. %s)' % (pv, pv.time, pv.plsess)
pv.setting = info['setting']
pv.subject = info['subject']
pv.save()
Vote.objects.filter(session=pv).delete()
for vi in info['votes']:
mp = self.mp_by_name[vi['name']]
if vi['vote'] == 'S':
continue
vote = Vote(session=pv, vote=vi['vote'], member=mp, party=vi['party'])
vote.save()
pv.count_votes()
counts = [int(v) for v in pv.vote_counts.split(',')]
n = 0
for v in counts:
n += v
assert n in (197, 198, 199)
pv.save()
return pv
def import_session(self, info):
if not info['plsess'] in self.plsess_by_id:
try:
plsess = PlenarySession.objects.get(origin_id=info['plsess'])
except PlenarySession.DoesNotExist:
raise Exception("Vote %s refers to unexisting plenary session" % (info['number'], info['plsess']))
self.plsess_by_id[info['plsess']] = plsess
plsess = self.plsess_by_id[info['plsess']]
try:
pv = PlenaryVote.objects.get(plsess=plsess, number=info['number'])
if not self.replace:
return
except PlenaryVote.DoesNotExist:
pv = PlenaryVote(plsess=plsess, number=info['number'])
self.logger.info('processing plenary vote %s/%d' % (plsess.name, info['number']))
s = self.open_url(info['link'], self.CACHE_DIR)
doc = html.fromstring(s)
hdr_el = doc.xpath('//table[@class="voteResults"]')
if len(hdr_el) < 1:
raise ParseError('vote header not found')
hdr_el = hdr_el[0]
s = self.clean_text(hdr_el.xpath('caption')[0].text)
m = re.match(r'Äänestys (\d+) klo (\d{2}\.\d{2})', s, re.U)
info['time'] = m.groups()[1]
el = hdr_el.xpath('tbody/tr')[0].xpath('td')[1]
s = self.clean_text(el.text)
info['subject'] = s
el = hdr_el.xpath('tbody/tr/td/strong')[0]
s = self.clean_text(el.text)
step = PROCESSING_STEP[s]
el = doc.xpath("//th[contains(., 'nestysasettelu')]")[0]
s = self.clean_text(el.getnext().text)
info['setting'] = s
vote_list_el = doc.xpath('//table[@class="statistics"]/tbody/tr')
if len(vote_list_el) < 196/2 or len(vote_list_el) > 200/2:
raise ParseError('vote list not found')
votes = []
for row_el in vote_list_el:
td_list = row_el.xpath('td')
if len(td_list) != 5:
raise ParseError('invalid vote row')
votes.append(parse_vote(td_list[0].text, td_list[1].text))
if td_list[3].text:
votes.append(parse_vote(td_list[3].text, td_list[4].text))
info['votes'] = votes
pv.mark_modified()
pv.mark_checked()
self.updated += 1
return self.save_session(pv, info)
def _make_obj_lists(self):
if not hasattr(self, 'mp_by_name'):
mp_list = Member.objects.all()
mpd = {}
for mp in mp_list:
mpd[mp.name] = mp
self.mp_by_name = mpd
if not hasattr(self, 'plsess_by_id'):
plsess_list = PlenarySession.objects.all()
psd = {}
for pl in plsess_list:
psd[pl.origin_id] = pl
self.plsess_by_id = psd
def _import_one(self, vote_id):
(year, plsess, nr) = vote_id.split('/')
url = self.URL_BASE + self.VOTE_URL % (int(year), plsess, int(nr))
el_list, next_link = self.read_listing(self.CACHE_DIR, url)
if len(el_list) != 1:
raise ParseError("vote with id %s not found" % vote_id, url=url)
el = el_list[0]
vote_id_str = "%s/%s/%s" % (plsess, year, nr)
got_id = "%s/%d" % (el['plsess'], el['number'])
if vote_id_str != got_id:
raise ParseError("invalid vote returned (wanted %s, got %s)" % (vote_id_str, got_id), url=url)
info = {'plsess': el['plsess'], 'number': el['number']}
info['link'] = el['results_link']
try:
plv = self.import_session(info)
except ParseError as e:
e.url = url
raise
db.reset_queries()
return plv
def import_one(self, vote_id):
self._make_obj_lists()
self.updated = 0
try:
plv = self._import_one(vote_id)
except ParseError as e:
if e.url:
# nuke the cache if we have one
fname = self.http.get_fname(e.url, self.CACHE_DIR)
if fname:
os.unlink(fname)
self.logger.error("exception: %s" % e)
# retry
plv = self._import_one(vote_id)
return plv
def import_votes(self, **options):
self._make_obj_lists()
self.full_update = options.get('full')
self.updated = 0
if options.get('single', False):
self.replace = True
this_year = datetime.now().year
for year in range(this_year, self.BEGIN_YEAR-1, -1):
next_link = self.URL_BASE + self.VOTE_LIST_URL % year
year_votes = PlenaryVote.objects.filter(plsess__name__endswith=year)
seen_votes = []
while next_link:
updated_begin = self.updated
self.logger.debug("Fetching from %s" % next_link)
el_list, next_link = self.read_listing(self.CACHE_DIR, next_link)
for el in el_list:
if el['plsess'] == '85/1999':
# First plenary session in origin database
next_link = None
break
vote_id = '%s/%s' % (el['number'], el['plsess'])
seen_votes.append(vote_id)
info = {'plsess': el['plsess'], 'number': el['number']}
info['link'] = el['results_link']
if 'single' in options:
if options['single'] != vote_id:
continue
self.import_session(info)
db.reset_queries()
if options.get('single', None) == vote_id:
return
updated_this_round = self.updated - updated_begin
if not updated_this_round and not self.full_update and not options.get('single', None):
return
if not options.get('single', None):
for plvote in list(year_votes):
vote_id = '%d/%s' % (plvote.number, plvote.plsess.name)
if vote_id not in seen_votes:
print(("Vote %s not found anymore" % vote_id))
plvote.delete()
| kansanmuisti/kamu | Attic/eduskunta/vote.py | Python | agpl-3.0 | 8,603 |
from flask import render_template, redirect, request, url_for, flash
from . import auth
from ..models import User
from .forms import LoginForm, RegistrationForm
from flask_login import login_user, logout_user, login_required, current_user
from .. import db
from ..email import send_email
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('you have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data, username=form.username.data, password=form.password.data)
db.session.add(user)
flash('You can now login.')
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
| bobbyxuy/flask_web | app/auth/views.py | Python | mit | 1,344 |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared testing utilities."""
# Avoid the grpc and google.cloud.grpc collision.
from __future__ import absolute_import
class _Monkey(object):
# context-manager for replacing module names in the scope of a test.
def __init__(self, module, **kw):
self.module = module
if len(kw) == 0: # pragma: NO COVER
raise ValueError('_Monkey was used with nothing to monkey-patch')
self.to_restore = {key: getattr(module, key) for key in kw}
for key, value in kw.items():
setattr(module, key, value)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for key, value in self.to_restore.items():
setattr(self.module, key, value)
class _NamedTemporaryFile(object):
def __init__(self, suffix=''):
import os
import tempfile
filehandle, self.name = tempfile.mkstemp(suffix=suffix)
os.close(filehandle)
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
import os
os.remove(self.name)
def _tempdir_maker():
import contextlib
import shutil
import tempfile
@contextlib.contextmanager
def _tempdir_mgr():
temp_dir = tempfile.mkdtemp()
yield temp_dir
shutil.rmtree(temp_dir)
return _tempdir_mgr
_tempdir = _tempdir_maker()
del _tempdir_maker
class _GAXBaseAPI(object):
_random_gax_error = False
def __init__(self, **kw):
self.__dict__.update(kw)
def _make_grpc_error(self, status_code, trailing=None):
from grpc._channel import _RPCState
from google.cloud.exceptions import GrpcRendezvous
details = 'Some error details.'
exc_state = _RPCState((), None, trailing, status_code, details)
return GrpcRendezvous(exc_state, None, None, None)
def _make_grpc_not_found(self):
from grpc import StatusCode
return self._make_grpc_error(StatusCode.NOT_FOUND)
def _make_grpc_failed_precondition(self):
from grpc import StatusCode
return self._make_grpc_error(StatusCode.FAILED_PRECONDITION)
def _make_grpc_deadline_exceeded(self):
from grpc import StatusCode
return self._make_grpc_error(StatusCode.DEADLINE_EXCEEDED)
class _GAXPageIterator(object):
def __init__(self, *pages, **kwargs):
self._pages = iter(pages)
self.page_token = kwargs.get('page_token')
def next(self):
import six
return six.next(self._pages)
__next__ = next
| axbaretto/beam | sdks/python/.tox/py27gcp/lib/python2.7/site-packages/google/cloud/_testing.py | Python | apache-2.0 | 3,140 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ==============================================================================
# author :Ghislain Vieilledent
# email :[email protected], [email protected]
# web :https://ecology.ghislainv.fr
# python_version :>=2.7
# license :GPLv3
# ==============================================================================
# Standard library imports
from __future__ import division, print_function # Python 3 compatibility
# Third party imports
import numpy as np
import pandas as pd
from patsy import dmatrices
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
# Local application imports
from ..model import model_binomial_iCAR
# AUC (see Liu 2011)
def computeAUC(pos_scores, neg_scores, n_sample=100000):
"""Compute the AUC index.
Compute the Area Under the ROC Curve (AUC). See `Liu et al. 2011
<https://doi.org/10.1111/j.1600-0587.2010.06354.x>`_\\ .
:param pos_scores: Scores of positive observations.
:param neg_scores: Scores of negative observations.
:param n_samples: Number of samples to approximate AUC.
:return: AUC value.
"""
pos_scores = np.array(pos_scores, dtype=np.float)
neg_scores = np.array(neg_scores, dtype=np.float)
pos_sample = np.random.choice(pos_scores, size=n_sample, replace=True)
neg_sample = np.random.choice(neg_scores, size=n_sample, replace=True)
AUC = np.mean(1.0*(pos_sample > neg_sample) + 0.5*(pos_sample == neg_sample))
return AUC
# accuracy_indices
def accuracy_indices(pred, obs):
"""Compute accuracy indices.
Compute the Overall Accuracy, the Figure of Merit, the
Specificity, the Sensitivity, the True Skill Statistics and the
Cohen's Kappa from a confusion matrix built on predictions
vs. observations.
:param pred: List of predictions.
:param obs: List of observations.
:return: A dictionnary of accuracy indices.
"""
# Create pandas data-frame
df = pd.DataFrame({"pred": pred, "obs": obs})
# Confusion matrix
n00 = sum((df["pred"] == 0) & (df["obs"] == 0))
n10 = sum((df["pred"] == 1) & (df["obs"] == 0))
n01 = sum((df["pred"] == 0) & (df["obs"] == 1))
n11 = sum((df["pred"] == 1) & (df["obs"] == 1))
# Accuracy indices
N = n11 + n10 + n00 + n01
OA = (n11 + n00) / N
FOM = n11 / (n11 + n10 + n01)
Sensitivity = n11 / (n11 + n01)
Specificity = n00 / (n00 + n10)
TSS = Sensitivity + Specificity - 1
Prob_1and1 = (n11 + n10) * (n11 + n01)
Prob_0and0 = (n00 + n01) * (n00 + n10)
Expected_accuracy = (Prob_1and1 + Prob_0and0) / (N * N)
Kappa = (OA - Expected_accuracy) / (1 - Expected_accuracy)
r = {"OA": OA, "EA": Expected_accuracy,
"FOM": FOM, "Sen": Sensitivity, "Spe": Specificity,
"TSS": TSS, "K": Kappa}
return r
# cross_validation
def cross_validation(data, formula, mod_type="icar", ratio=30,
nrep=5, seed=1234,
icar_args={"n_neighbors": None, "neighbors": None,
"burnin": 1000, "mcmc": 1000,
"thin": 1, "beta_start": 0},
rf_args={"n_estimators": 100, "n_jobs": None}):
"""Model cross-validation
Performs model cross-validation.
:param data: Full dataset.
:param formula: Model formula.
:param mod_type: Model type, can be either "icar", "glm", or "rf".
:param ratio: Percentage of data used for testing.
:param nrep: Number of repetitions for cross-validation.
:param seed: Seed for reproducibility.
:param icar_args: Dictionnary of arguments for the binomial iCAR model.
:param rf_args: Dictionnary of arguments for the random forest model.
:return: A Pandas data frame with cross-validation results.
"""
# Set random seed for reproducibility
np.random.seed(seed)
# Result table
CV_df = pd.DataFrame({"index": ["AUC", "OA", "EA", "FOM", "Sen",
"Spe", "TSS", "K"]})
# Constants
nobs = data.shape[0]
nobs_test = int(round(nobs * (ratio / 100)))
rows = np.arange(nobs)
# Loop on repetitions
for i in range(nrep):
# Print message
print("Repetition #: " + str(i+1))
# Data-sets for cross-validation
rows_test = np.random.choice(rows, size=nobs_test, replace=False)
rows_train = np.where(np.isin(rows, rows_test, invert=True))
data_test = data.iloc[rows_test].copy()
data_train = data.iloc[rows_train].copy()
# True threshold in data_test (might be slightly different from 0.5)
# nfor_test = sum(data_test.fcc23 == 1)
ndefor_test = sum(data_test.fcc23 == 0)
thresh_test = 1 - (ndefor_test / nobs_test)
# Training matrices
y, x = dmatrices(formula, data=data_train, NA_action="drop")
Y_train = y[:, 0]
X_train = x[:, :-1] # We remove the last column (cells)
# Test matrices
y, x = dmatrices(formula, data=data_test, NA_action="drop")
# Y_test = y[:, 0]
X_test = x[:, :-1] # We remove the last column (cells)
# Compute deforestation probability
# icar
if (mod_type == "icar"):
# Training the model
mod_icar = model_binomial_iCAR(
# Observations
suitability_formula=formula, data=data_train,
# Spatial structure
n_neighbors=icar_args["n_neighbors"],
neighbors=icar_args["neighbors"],
# Chains
burnin=icar_args["burnin"], mcmc=icar_args["mcmc"],
thin=icar_args["thin"],
# Starting values
beta_start=icar_args["beta_start"])
# Predictions for the test dataset
data_test["theta_pred"] = mod_icar.predict(new_data=data_test)
# glm
if (mod_type == "glm"):
# Training the model
glm = LogisticRegression(solver="lbfgs")
mod_glm = glm.fit(X_train, Y_train)
# Predictions for the test dataset
data_test["theta_pred"] = mod_glm.predict_proba(X_test)[:, 1]
# RF
if (mod_type == "rf"):
# Training the model
rf = RandomForestClassifier(n_estimators=rf_args["n_estimators"],
n_jobs=rf_args["n_jobs"])
mod_rf = rf.fit(X_train, Y_train)
# Predictions for the test dataset
data_test["theta_pred"] = mod_rf.predict_proba(X_test)[:, 1]
# Transform probabilities into binary data
proba_thresh = np.quantile(data_test["theta_pred"], thresh_test)
data_test["pred"] = 0
data_test.loc[data_test.theta_pred > proba_thresh, "pred"] = 1
# AUC
pos_scores = data_test.theta_pred[data_test.fcc23 == 0]
neg_scores = data_test.theta_pred[data_test.fcc23 == 1]
AUC = computeAUC(pos_scores, neg_scores)
# Accuracy indices
obs = 1 - data_test.fcc23
pred = data_test.pred
ai = accuracy_indices(obs, pred)
# Tupple of indices
acc_ind = (AUC, ai["OA"], ai["EA"], ai["FOM"], ai["Sen"],
ai["Spe"], ai["TSS"], ai["K"])
# Results as data frame
CV_df["rep" + str(i+1)] = acc_ind
# Mean over repetitions
CV_values = CV_df.loc[:, CV_df.columns != "index"]
CV_df["mean"] = np.mean(CV_values, axis=1)
CV_df = CV_df.round(4)
return CV_df
# End
| ghislainv/deforestprob | forestatrisk/validate/model_validation.py | Python | gpl-3.0 | 7,618 |
__author__ = 'jitrixis'
from TCP import * | Jitrixis/2ARC-Network-stack | TVpy/Layers/Segment/all.py | Python | mit | 42 |
"""IPython terminal interface using prompt_toolkit"""
from __future__ import print_function
import os
import sys
import warnings
from warnings import warn
from IPython.core.interactiveshell import InteractiveShell, InteractiveShellABC
from IPython.utils import io
from IPython.utils.py3compat import PY3, cast_unicode_py2, input, string_types
from IPython.utils.terminal import toggle_set_term_title, set_term_title
from IPython.utils.process import abbrev_cwd
from traitlets import Bool, Unicode, Dict, Integer, observe, Instance, Type, default, Enum, Union
from prompt_toolkit.document import Document
from prompt_toolkit.enums import DEFAULT_BUFFER, EditingMode
from prompt_toolkit.filters import (HasFocus, Condition, IsDone)
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.shortcuts import create_prompt_application, create_eventloop, create_prompt_layout, create_output
from prompt_toolkit.interface import CommandLineInterface
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.layout.processors import ConditionalProcessor, HighlightMatchingBracketProcessor
from prompt_toolkit.styles import PygmentsStyle, DynamicStyle
from pygments.styles import get_style_by_name
from pygments.style import Style
from pygments.token import Token
from .debugger import TerminalPdb, Pdb
from .magics import TerminalMagics
from .pt_inputhooks import get_inputhook_name_and_func
from .prompts import Prompts, ClassicPrompts, RichPromptDisplayHook
from .ptutils import IPythonPTCompleter, IPythonPTLexer
from .shortcuts import register_ipython_shortcuts
DISPLAY_BANNER_DEPRECATED = object()
from pygments.style import Style
class _NoStyle(Style): pass
_style_overrides_light_bg = {
Token.Prompt: '#0000ff',
Token.PromptNum: '#0000ee bold',
Token.OutPrompt: '#cc0000',
Token.OutPromptNum: '#bb0000 bold',
}
_style_overrides_linux = {
Token.Prompt: '#00cc00',
Token.PromptNum: '#00bb00 bold',
Token.OutPrompt: '#cc0000',
Token.OutPromptNum: '#bb0000 bold',
}
def get_default_editor():
try:
ed = os.environ['EDITOR']
if not PY3:
ed = ed.decode()
return ed
except KeyError:
pass
except UnicodeError:
warn("$EDITOR environment variable is not pure ASCII. Using platform "
"default editor.")
if os.name == 'posix':
return 'vi' # the only one guaranteed to be there!
else:
return 'notepad' # same in Windows!
# conservatively check for tty
# overridden streams can result in things like:
# - sys.stdin = None
# - no isatty method
for _name in ('stdin', 'stdout', 'stderr'):
_stream = getattr(sys, _name)
if not _stream or not hasattr(_stream, 'isatty') or not _stream.isatty():
_is_tty = False
break
else:
_is_tty = True
_use_simple_prompt = ('IPY_TEST_SIMPLE_PROMPT' in os.environ) or (not _is_tty)
class TerminalInteractiveShell(InteractiveShell):
space_for_menu = Integer(6, help='Number of line at the bottom of the screen '
'to reserve for the completion menu'
).tag(config=True)
def _space_for_menu_changed(self, old, new):
self._update_layout()
pt_cli = None
debugger_history = None
_pt_app = None
simple_prompt = Bool(_use_simple_prompt,
help="""Use `raw_input` for the REPL, without completion and prompt colors.
Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR. Known usage are:
IPython own testing machinery, and emacs inferior-shell integration through elpy.
This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT`
environment variable is set, or the current terminal is not a tty.
"""
).tag(config=True)
@property
def debugger_cls(self):
return Pdb if self.simple_prompt else TerminalPdb
confirm_exit = Bool(True,
help="""
Set to confirm when you try to exit IPython with an EOF (Control-D
in Unix, Control-Z/Enter in Windows). By typing 'exit' or 'quit',
you can force a direct exit without any confirmation.""",
).tag(config=True)
editing_mode = Unicode('emacs',
help="Shortcut style to use at the prompt. 'vi' or 'emacs'.",
).tag(config=True)
mouse_support = Bool(False,
help="Enable mouse support in the prompt"
).tag(config=True)
# We don't load the list of styles for the help string, because loading
# Pygments plugins takes time and can cause unexpected errors.
highlighting_style = Union([Unicode('legacy'), Type(klass=Style)],
help="""The name or class of a Pygments style to use for syntax
highlighting. To see available styles, run `pygmentize -L styles`."""
).tag(config=True)
@observe('highlighting_style')
@observe('colors')
def _highlighting_style_changed(self, change):
self.refresh_style()
def refresh_style(self):
self._style = self._make_style_from_name_or_cls(self.highlighting_style)
highlighting_style_overrides = Dict(
help="Override highlighting format for specific tokens"
).tag(config=True)
true_color = Bool(False,
help=("Use 24bit colors instead of 256 colors in prompt highlighting. "
"If your terminal supports true color, the following command "
"should print 'TRUECOLOR' in orange: "
"printf \"\\x1b[38;2;255;100;0mTRUECOLOR\\x1b[0m\\n\"")
).tag(config=True)
editor = Unicode(get_default_editor(),
help="Set the editor used by IPython (default to $EDITOR/vi/notepad)."
).tag(config=True)
prompts_class = Type(Prompts, help='Class used to generate Prompt token for prompt_toolkit').tag(config=True)
prompts = Instance(Prompts)
@default('prompts')
def _prompts_default(self):
return self.prompts_class(self)
@observe('prompts')
def _(self, change):
self._update_layout()
@default('displayhook_class')
def _displayhook_class_default(self):
return RichPromptDisplayHook
term_title = Bool(True,
help="Automatically set the terminal title"
).tag(config=True)
display_completions = Enum(('column', 'multicolumn','readlinelike'),
help= ( "Options for displaying tab completions, 'column', 'multicolumn', and "
"'readlinelike'. These options are for `prompt_toolkit`, see "
"`prompt_toolkit` documentation for more information."
),
default_value='multicolumn').tag(config=True)
highlight_matching_brackets = Bool(True,
help="Highlight matching brackets.",
).tag(config=True)
extra_open_editor_shortcuts = Bool(False,
help="Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. "
"This is in addition to the F2 binding, which is always enabled."
).tag(config=True)
@observe('term_title')
def init_term_title(self, change=None):
# Enable or disable the terminal title.
if self.term_title:
toggle_set_term_title(True)
set_term_title('IPython: ' + abbrev_cwd())
else:
toggle_set_term_title(False)
def init_display_formatter(self):
super(TerminalInteractiveShell, self).init_display_formatter()
# terminal only supports plain text
self.display_formatter.active_types = ['text/plain']
# disable `_ipython_display_`
self.display_formatter.ipython_display_formatter.enabled = False
def init_prompt_toolkit_cli(self):
if self.simple_prompt:
# Fall back to plain non-interactive output for tests.
# This is very limited, and only accepts a single line.
def prompt():
isp = self.input_splitter
prompt_text = "".join(x[1] for x in self.prompts.in_prompt_tokens())
prompt_continuation = "".join(x[1] for x in self.prompts.continuation_prompt_tokens())
while isp.push_accepts_more():
line = cast_unicode_py2(input(prompt_text))
isp.push(line)
prompt_text = prompt_continuation
return isp.source_reset()
self.prompt_for_code = prompt
return
# Set up keyboard shortcuts
kbmanager = KeyBindingManager.for_prompt(
enable_open_in_editor=self.extra_open_editor_shortcuts,
)
register_ipython_shortcuts(kbmanager.registry, self)
# Pre-populate history from IPython's history database
history = InMemoryHistory()
last_cell = u""
for __, ___, cell in self.history_manager.get_tail(self.history_load_length,
include_latest=True):
# Ignore blank lines and consecutive duplicates
cell = cell.rstrip()
if cell and (cell != last_cell):
history.append(cell)
last_cell = cell
self._style = self._make_style_from_name_or_cls(self.highlighting_style)
self.style = DynamicStyle(lambda: self._style)
editing_mode = getattr(EditingMode, self.editing_mode.upper())
def patch_stdout(**kwargs):
return self.pt_cli.patch_stdout_context(**kwargs)
self._pt_app = create_prompt_application(
editing_mode=editing_mode,
key_bindings_registry=kbmanager.registry,
history=history,
completer=IPythonPTCompleter(shell=self,
patch_stdout=patch_stdout),
enable_history_search=True,
style=self.style,
mouse_support=self.mouse_support,
**self._layout_options()
)
self._eventloop = create_eventloop(self.inputhook)
self.pt_cli = CommandLineInterface(
self._pt_app, eventloop=self._eventloop,
output=create_output(true_color=self.true_color))
def _make_style_from_name_or_cls(self, name_or_cls):
"""
Small wrapper that make an IPython compatible style from a style name
We need that to add style for prompt ... etc.
"""
style_overrides = {}
if name_or_cls == 'legacy':
legacy = self.colors.lower()
if legacy == 'linux':
style_cls = get_style_by_name('monokai')
style_overrides = _style_overrides_linux
elif legacy == 'lightbg':
style_overrides = _style_overrides_light_bg
style_cls = get_style_by_name('pastie')
elif legacy == 'neutral':
# The default theme needs to be visible on both a dark background
# and a light background, because we can't tell what the terminal
# looks like. These tweaks to the default theme help with that.
style_cls = get_style_by_name('default')
style_overrides.update({
Token.Number: '#007700',
Token.Operator: 'noinherit',
Token.String: '#BB6622',
Token.Name.Function: '#2080D0',
Token.Name.Class: 'bold #2080D0',
Token.Name.Namespace: 'bold #2080D0',
Token.Prompt: '#009900',
Token.PromptNum: '#00ff00 bold',
Token.OutPrompt: '#990000',
Token.OutPromptNum: '#ff0000 bold',
})
# Hack: Due to limited color support on the Windows console
# the prompt colors will be wrong without this
if os.name == 'nt':
style_overrides.update({
Token.Prompt: '#ansidarkgreen',
Token.PromptNum: '#ansigreen bold',
Token.OutPrompt: '#ansidarkred',
Token.OutPromptNum: '#ansired bold',
})
elif legacy =='nocolor':
style_cls=_NoStyle
style_overrides = {}
else :
raise ValueError('Got unknown colors: ', legacy)
else :
if isinstance(name_or_cls, string_types):
style_cls = get_style_by_name(name_or_cls)
else:
style_cls = name_or_cls
style_overrides = {
Token.Prompt: '#009900',
Token.PromptNum: '#00ff00 bold',
Token.OutPrompt: '#990000',
Token.OutPromptNum: '#ff0000 bold',
}
style_overrides.update(self.highlighting_style_overrides)
style = PygmentsStyle.from_defaults(pygments_style_cls=style_cls,
style_dict=style_overrides)
return style
def _layout_options(self):
"""
Return the current layout option for the current Terminal InteractiveShell
"""
return {
'lexer':IPythonPTLexer(),
'reserve_space_for_menu':self.space_for_menu,
'get_prompt_tokens':self.prompts.in_prompt_tokens,
'get_continuation_tokens':self.prompts.continuation_prompt_tokens,
'multiline':True,
'display_completions_in_columns': (self.display_completions == 'multicolumn'),
# Highlight matching brackets, but only when this setting is
# enabled, and only when the DEFAULT_BUFFER has the focus.
'extra_input_processors': [ConditionalProcessor(
processor=HighlightMatchingBracketProcessor(chars='[](){}'),
filter=HasFocus(DEFAULT_BUFFER) & ~IsDone() &
Condition(lambda cli: self.highlight_matching_brackets))],
}
def _update_layout(self):
"""
Ask for a re computation of the application layout, if for example ,
some configuration options have changed.
"""
if self._pt_app:
self._pt_app.layout = create_prompt_layout(**self._layout_options())
def prompt_for_code(self):
document = self.pt_cli.run(
pre_run=self.pre_prompt, reset_current_buffer=True)
return document.text
def enable_win_unicode_console(self):
if sys.version_info >= (3, 6):
# Since PEP 528, Python uses the unicode APIs for the Windows
# console by default, so WUC shouldn't be needed.
return
import win_unicode_console
if PY3:
win_unicode_console.enable()
else:
# https://github.com/ipython/ipython/issues/9768
from win_unicode_console.streams import (TextStreamWrapper,
stdout_text_transcoded, stderr_text_transcoded)
class LenientStrStreamWrapper(TextStreamWrapper):
def write(self, s):
if isinstance(s, bytes):
s = s.decode(self.encoding, 'replace')
self.base.write(s)
stdout_text_str = LenientStrStreamWrapper(stdout_text_transcoded)
stderr_text_str = LenientStrStreamWrapper(stderr_text_transcoded)
win_unicode_console.enable(stdout=stdout_text_str,
stderr=stderr_text_str)
def init_io(self):
if sys.platform not in {'win32', 'cli'}:
return
self.enable_win_unicode_console()
import colorama
colorama.init()
# For some reason we make these wrappers around stdout/stderr.
# For now, we need to reset them so all output gets coloured.
# https://github.com/ipython/ipython/issues/8669
# io.std* are deprecated, but don't show our own deprecation warnings
# during initialization of the deprecated API.
with warnings.catch_warnings():
warnings.simplefilter('ignore', DeprecationWarning)
io.stdout = io.IOStream(sys.stdout)
io.stderr = io.IOStream(sys.stderr)
def init_magics(self):
super(TerminalInteractiveShell, self).init_magics()
self.register_magics(TerminalMagics)
def init_alias(self):
# The parent class defines aliases that can be safely used with any
# frontend.
super(TerminalInteractiveShell, self).init_alias()
# Now define aliases that only make sense on the terminal, because they
# need direct access to the console in a way that we can't emulate in
# GUI or web frontend
if os.name == 'posix':
for cmd in ['clear', 'more', 'less', 'man']:
self.alias_manager.soft_define_alias(cmd, cmd)
def __init__(self, *args, **kwargs):
super(TerminalInteractiveShell, self).__init__(*args, **kwargs)
self.init_prompt_toolkit_cli()
self.init_term_title()
self.keep_running = True
self.debugger_history = InMemoryHistory()
def ask_exit(self):
self.keep_running = False
rl_next_input = None
def pre_prompt(self):
if self.rl_next_input:
# We can't set the buffer here, because it will be reset just after
# this. Adding a callable to pre_run_callables does what we need
# after the buffer is reset.
s = cast_unicode_py2(self.rl_next_input)
def set_doc():
self.pt_cli.application.buffer.document = Document(s)
if hasattr(self.pt_cli, 'pre_run_callables'):
self.pt_cli.pre_run_callables.append(set_doc)
else:
# Older version of prompt_toolkit; it's OK to set the document
# directly here.
set_doc()
self.rl_next_input = None
def interact(self, display_banner=DISPLAY_BANNER_DEPRECATED):
if display_banner is not DISPLAY_BANNER_DEPRECATED:
warn('interact `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2)
self.keep_running = True
while self.keep_running:
print(self.separate_in, end='')
try:
code = self.prompt_for_code()
except EOFError:
if (not self.confirm_exit) \
or self.ask_yes_no('Do you really want to exit ([y]/n)?','y','n'):
self.ask_exit()
else:
if code:
self.run_cell(code, store_history=True)
def mainloop(self, display_banner=DISPLAY_BANNER_DEPRECATED):
# An extra layer of protection in case someone mashing Ctrl-C breaks
# out of our internal code.
if display_banner is not DISPLAY_BANNER_DEPRECATED:
warn('mainloop `display_banner` argument is deprecated since IPython 5.0. Call `show_banner()` if needed.', DeprecationWarning, stacklevel=2)
while True:
try:
self.interact()
break
except KeyboardInterrupt as e:
print("\n%s escaped interact()\n" % type(e).__name__)
finally:
# An interrupt during the eventloop will mess up the
# internal state of the prompt_toolkit library.
# Stopping the eventloop fixes this, see
# https://github.com/ipython/ipython/pull/9867
if hasattr(self, '_eventloop'):
self._eventloop.stop()
_inputhook = None
def inputhook(self, context):
if self._inputhook is not None:
self._inputhook(context)
active_eventloop = None
def enable_gui(self, gui=None):
if gui:
self.active_eventloop, self._inputhook =\
get_inputhook_name_and_func(gui)
else:
self.active_eventloop = self._inputhook = None
# Run !system commands directly, not through pipes, so terminal programs
# work correctly.
system = InteractiveShell.system_raw
def auto_rewrite_input(self, cmd):
"""Overridden from the parent class to use fancy rewriting prompt"""
if not self.show_rewritten_input:
return
tokens = self.prompts.rewrite_prompt_tokens()
if self.pt_cli:
self.pt_cli.print_tokens(tokens)
print(cmd)
else:
prompt = ''.join(s for t, s in tokens)
print(prompt, cmd, sep='')
_prompts_before = None
def switch_doctest_mode(self, mode):
"""Switch prompts to classic for %doctest_mode"""
if mode:
self._prompts_before = self.prompts
self.prompts = ClassicPrompts(self)
elif self._prompts_before:
self.prompts = self._prompts_before
self._prompts_before = None
self._update_layout()
InteractiveShellABC.register(TerminalInteractiveShell)
if __name__ == '__main__':
TerminalInteractiveShell.instance().interact()
| pacoqueen/ginn | extra/install/ipython2/ipython-5.10.0/IPython/terminal/interactiveshell.py | Python | gpl-2.0 | 21,375 |
'''
NetNS
=====
A NetNS object is IPRoute-like. It runs in the main network
namespace, but also creates a proxy process running in
the required netns. All the netlink requests are done via
that proxy process.
NetNS supports standard IPRoute API, so can be used instead
of IPRoute, e.g., in IPDB::
# start the main network settings database:
ipdb_main = IPDB()
# start the same for a netns:
ipdb_test = IPDB(nl=NetNS('test'))
# create VETH
ipdb_main.create(ifname='v0p0', kind='veth', peer='v0p1').commit()
# move peer VETH into the netns
with ipdb_main.interfaces.v0p1 as veth:
veth.net_ns_fd = 'test'
# please keep in mind, that netns move clears all the settings
# on a VETH interface pair, so one should run netns assignment
# as a separate operation only
# assign addresses
# please notice, that `v0p1` is already in the `test` netns,
# so should be accessed via `ipdb_test`
with ipdb_main.interfaces.v0p0 as veth:
veth.add_ip('172.16.200.1/24')
veth.up()
with ipdb_test.interfaces.v0p1 as veth:
veth.add_ip('172.16.200.2/24')
veth.up()
Please review also the test code, under `tests/test_netns.py` for
more examples.
By default, NetNS creates requested netns, if it doesn't exist,
or uses existing one. To control this behaviour, one can use flags
as for `open(2)` system call::
# create a new netns or fail, if it already exists
netns = NetNS('test', flags=os.O_CREAT | os.O_EXIST)
# create a new netns or use existing one
netns = NetNS('test', flags=os.O_CREAT)
# the same as above, the default behaviour
netns = NetNS('test')
To remove a network namespace::
from pyroute2 import NetNS
netns = NetNS('test')
netns.close()
netns.remove()
One should stop it first with `close()`, and only after that
run `remove()`.
'''
import os
import errno
import atexit
import select
import signal
import struct
import threading
import traceback
from socket import SOL_SOCKET
from socket import SO_RCVBUF
from pyroute2.config import MpPipe
from pyroute2.config import MpProcess
from pyroute2.iproute import IPRoute
from pyroute2.netlink.nlsocket import NetlinkMixin
from pyroute2.netlink.rtnl.iprsocket import MarshalRtnl
from pyroute2.iproute import IPRouteMixin
from pyroute2.netns import setns
from pyroute2.netns import remove
def NetNServer(netns, rcvch, cmdch, flags=os.O_CREAT):
'''
The netns server supposed to be started automatically by NetNS.
It has two communication channels: one simplex to forward incoming
netlink packets, `rcvch`, and other synchronous duplex to get
commands and send back responses, `cmdch`.
Channels should support standard socket API, should be compatible
with poll/select and should be able to transparently pickle objects.
NetNS uses `multiprocessing.Pipe` for this purpose, but it can be
any other implementation with compatible API.
The first parameter, `netns`, is a netns name. Depending on the
`flags`, the netns can be created automatically. The `flags` semantics
is exactly the same as for `open(2)` system call.
...
The server workflow is simple. The startup sequence::
1. Create or open a netns.
2. Start `IPRoute` instance. It will be used only on the low level,
the `IPRoute` will not parse any packet.
3. Start poll/select loop on `cmdch` and `IPRoute`.
On the startup, the server sends via `cmdch` the status packet. It can be
`None` if all is OK, or some exception.
Further data handling, depending on the channel, server side::
1. `IPRoute`: read an incoming netlink packet and send it unmodified
to the peer via `rcvch`. The peer, polling `rcvch`, can handle
the packet on its side.
2. `cmdch`: read tuple (cmd, argv, kwarg). If the `cmd` starts with
"send", then take `argv[0]` as a packet buffer, treat it as one
netlink packet and substitute PID field (offset 12, uint32) with
its own. Strictly speaking, it is not mandatory for modern netlink
implementations, but it is required by the protocol standard.
'''
signal.signal(signal.SIGINT, signal.SIG_IGN)
try:
nsfd = setns(netns, flags)
except OSError as e:
cmdch.send(e)
return e.errno
except Exception as e:
cmdch.send(OSError(errno.ECOMM, str(e), netns))
return 255
#
try:
ipr = IPRoute()
rcvch_lock = ipr._sproxy.lock
ipr._s_channel = rcvch
poll = select.poll()
poll.register(ipr, select.POLLIN | select.POLLPRI)
poll.register(cmdch, select.POLLIN | select.POLLPRI)
except Exception as e:
cmdch.send(e)
return 255
# all is OK so far
cmdch.send(None)
# 8<-------------------------------------------------------------
while True:
events = poll.poll()
for (fd, event) in events:
if fd == ipr.fileno():
bufsize = ipr.getsockopt(SOL_SOCKET, SO_RCVBUF) // 2
with rcvch_lock:
rcvch.send(ipr.recv(bufsize))
elif fd == cmdch.fileno():
try:
cmdline = cmdch.recv()
if cmdline is None:
poll.unregister(ipr)
poll.unregister(cmdch)
ipr.close()
os.close(nsfd)
return
(cmd, argv, kwarg) = cmdline
if cmd[:4] == 'send':
# Achtung
#
# It's a hack, but we just have to do it: one
# must use actual pid in netlink messages
#
# FIXME: there can be several messages in one
# call buffer; but right now we can ignore it
msg = argv[0][:12]
msg += struct.pack("I", os.getpid())
msg += argv[0][16:]
argv = list(argv)
argv[0] = msg
cmdch.send(getattr(ipr, cmd)(*argv, **kwarg))
except Exception as e:
e.tb = traceback.format_exc()
cmdch.send(e)
class NetNSProxy(object):
netns = 'default'
flags = os.O_CREAT
def __init__(self, *argv, **kwarg):
self.cmdlock = threading.Lock()
self.rcvch, rcvch = MpPipe()
self.cmdch, cmdch = MpPipe()
self.server = MpProcess(target=NetNServer,
args=(self.netns, rcvch, cmdch, self.flags))
self.server.start()
error = self.cmdch.recv()
if error is not None:
self.server.join()
raise error
else:
atexit.register(self.close)
def recv(self, bufsize, flags=0):
return self.rcvch.recv()
def close(self):
self.cmdch.send(None)
self.server.join()
def proxy(self, cmd, *argv, **kwarg):
with self.cmdlock:
self.cmdch.send((cmd, argv, kwarg))
response = self.cmdch.recv()
if isinstance(response, Exception):
raise response
return response
def fileno(self):
return self.rcvch.fileno()
def bind(self, *argv, **kwarg):
if 'async' in kwarg:
kwarg['async'] = False
return self.proxy('bind', *argv, **kwarg)
def send(self, *argv, **kwarg):
return self.proxy('send', *argv, **kwarg)
def sendto(self, *argv, **kwarg):
return self.proxy('sendto', *argv, **kwarg)
def getsockopt(self, *argv, **kwarg):
return self.proxy('getsockopt', *argv, **kwarg)
def setsockopt(self, *argv, **kwarg):
return self.proxy('setsockopt', *argv, **kwarg)
class NetNSocket(NetlinkMixin, NetNSProxy):
def bind(self, *argv, **kwarg):
return NetNSProxy.bind(self, *argv, **kwarg)
def close(self):
NetNSProxy.close(self)
def _sendto(self, *argv, **kwarg):
return NetNSProxy.sendto(self, *argv, **kwarg)
def _recv(self, *argv, **kwarg):
return NetNSProxy.recv(self, *argv, **kwarg)
class NetNS(IPRouteMixin, NetNSocket):
'''
NetNS is the IPRoute API with network namespace support.
**Why not IPRoute?**
The task to run netlink commands in some network namespace, being in
another network namespace, requires the architecture, that differs
too much from a simple Netlink socket.
NetNS starts a proxy process in a network namespace and uses
`multiprocessing` communication channels between the main and the proxy
processes to route all `recv()` and `sendto()` requests/responses.
**Any specific API calls?**
Nope. `NetNS` supports all the same, that `IPRoute` does, in the same
way. It provides full `socket`-compatible API and can be used in
poll/select as well.
The only difference is the `close()` call. In the case of `NetNS` it
is **mandatory** to close the socket before exit.
**NetNS and IPDB**
It is possible to run IPDB with NetNS::
from pyroute2 import NetNS
from pyroute2 import IPDB
ip = IPDB(nl=NetNS('somenetns'))
...
ip.release()
Do not forget to call `release()` when the work is done. It will shut
down `NetNS` instance as well.
'''
def __init__(self, netns, flags=os.O_CREAT):
self.netns = netns
self.flags = flags
super(NetNS, self).__init__()
self.marshal = MarshalRtnl()
def post_init(self):
pass
def remove(self):
'''
Try to remove this network namespace from the system.
'''
remove(self.netns)
| little-dude/pyroute2 | pyroute2/netns/nslink.py | Python | apache-2.0 | 9,894 |
import random
import pytz
from users.tests.factories import UserFactory
from ksg_nett import settings
from factory import Faker, SubFactory
from factory.django import DjangoModelFactory
from factory import post_generation
from quotes.models import Quote, QuoteVote
class QuoteFactory(DjangoModelFactory):
class Meta:
model = Quote
text = Faker('text')
verified_by = SubFactory(UserFactory)
reported_by = SubFactory(UserFactory)
@post_generation
def tagged(self, create, extracted, **kwargs):
if not create:
# Simple build, do nothing.
return
if extracted:
# A list of groups were passed in, use them
for user in extracted:
self.tagged.add(user)
else:
self.tagged.set(UserFactory.create_batch(2))
# created_at = Faker('past_datetime', tzinfo=pytz.timezone(settings.TIME_ZONE))
class QuoteVoteFactory(DjangoModelFactory):
class Meta:
model = QuoteVote
quote = SubFactory(QuoteFactory)
value = random.choice([-1, 1])
caster = SubFactory(UserFactory)
| KSG-IT/ksg-nett | quotes/tests/factories.py | Python | gpl-3.0 | 1,122 |
"""
Django settings for bheemboy project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6gx3w!%!akim)0i09vhq+77a6ot8%5afh^i33zp_y69f+6tx=)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Application definition
INSTALLED_APPS = [
'coursereg.apps.CourseregConfig',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles'
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'bheemboy.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'bheemboy.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static/')
AUTH_USER_MODEL = 'coursereg.User'
LOGIN_URL = '/signin'
DEFAULT_FROM_EMAIL = 'admin@localhost'
EMAIL_HOST = 'localhost'
EMAIL_TIMEOUT = 3
CAN_FACULTY_CREATE_COURSES = False
CAN_ADVISER_ADD_COURSES_FOR_STUDENTS = False
CONTACT_EMAIL = 'support@localhost'
MANUAL_FACULTY_REVIEW = True
WARN_REVIEW_BEFORE_LAST_DATE = False
## Logging
LOGGING = {
'version': 1,
'disable_existing_loggers': True,
'formatters': {
'standard': {
'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt' : "%d/%b/%Y %H:%M:%S"
},
},
'handlers': {
'null': {
'level':'DEBUG',
'class':'logging.NullHandler',
},
'console':{
'level':'INFO',
'class':'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django': {
'handlers':['console'],
'propagate': True,
'level':'WARN',
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'coursereg': {
'handlers': ['console'],
'propogate': True,
'level': 'WARN',
},
}
}
| s-gv/bheemboy | bheemboy/settings.py | Python | mit | 4,587 |
# -*- coding: utf-8 -*-
#
# deepnlp documentation build configuration file, created by
# sphinx-quickstart on Tue Nov 1 18:26:22 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'deepnlp'
copyright = u'2017, xichen ding'
author = u'xichen ding'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1.6'
# The full version, including alpha/beta/rc tags.
release = u'0.1.6'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'alabaster'
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#html_title = u'deepnlp v0.1.6'
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'deepnlpdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'deepnlp.tex', u'deepnlp Documentation',
u'xichen ding', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'deepnlp', u'deepnlp Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'deepnlp', u'deepnlp Documentation',
author, 'deepnlp', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| rockingdingo/deepnlp | docs/conf.py | Python | mit | 9,634 |
from sqlalchemy import create_engine, and_
from datetime import datetime
import numpy as np
from models import (ActionMixin, UserMixin, ItemMixin, ComputationMixin,
COMPUTATION_SK_NAME,
ACTION_UPVOTE, ACTION_DOWNVOTE,
ACTION_FLAG_SPAM, ACTION_FLAG_HAM)
import spam_utils as su
import spam_detection_karger as sdk
import spam_detection_dirichlet as sdd
import hitsDB
SPAM_ALGO = su.ALGO_DIRICHLET
def bind_engine(engine, session, base, should_create=True):
session.configure(bind=engine)
base.metadata.bind = engine
if should_create:
base.metadata.create_all(engine)
def bootstrap(base, create_all=False):
""" Engine should be binded before calling this function."""
class Computation(ComputationMixin, base):
pass
class ModerationAction(ActionMixin, base):
pass
class ModeratedAnnotation(ItemMixin, base):
pass
class ModerationUser(UserMixin, base):
pass
ActionMixin.cls = ModerationAction
ItemMixin.cls = ModeratedAnnotation
ComputationMixin.cls = Computation
UserMixin.cls = ModerationUser
if create_all:
base.metadata.create_all(base.metadata.bind)
def run_offline_spam_detection(algo_name, session):
""" Method runs offline spam detection. """
# Obtains class names to perform db querries later.
if algo_name == su.ALGO_KARGER:
sdk.run_offline_computations(session)
else:
sdd.run_offline_computations(session)
pass
session.flush()
def raise_spam_flag(item, user, session, algo_name=su.ALGO_DIRICHLET):
timestamp = datetime.utcnow()
if algo_name == su.ALGO_KARGER:
sdk.flag_spam(item, user, timestamp, session)
else:
sdd.flag_spam(item, user, timestamp, session)
def raise_ham_flag(item, user, session, algo_name=su.ALGO_DIRICHLET):
timestamp = datetime.utcnow()
if algo_name == su.ALGO_KARGER:
sdk.flag_ham(item, user, timestamp, session)
else:
sdd.flag_ham(item, user, timestamp, session)
def suggest_n_users_to_review(item, n, session):
if item is None or item.page_url is None:
return []
n_users = hitsDB.suggest_n_users_to_review(item, n, session)
if len(n_users) < n:
# todo(michael): do random sampling (or some criteria)
pass
return n_users
def get_n_items_for_spam_mm_randomly(n, session):
return ItemMixin.cls.get_n_items_for_spam_mm_randomly(n, session)
def delete_spam_item_by_author(item, session, algo_name=su.ALGO_DIRICHLET):
""" If item is deleted by author then there is no reputation damage to the
author, plus users who flagged it receive boost to base reliability.
"""
if item.action_twin is not None:
# If the item is also an action, delete the action first.
if item.action_twin.type == ACTION_UPVOTE:
item.parent.author.mm_vote_counter -= 1
item.parent.author.vote_counter -= 1
elif item.action_twin.type == ACTION_DOWNVOTE:
item.parent.author.mm_vote_counter += 1
item.parent.author.vote_counter += 1
else:
raise Exception("Unknown action: %s" % item.action_twin)
session.delete(item.action_twin)
session.flush()
# Okay, deletes the item.
if algo_name == su.ALGO_KARGER:
sdk.delete_spam_item_by_author(item, session)
elif algo_name == su.ALGO_DIRICHLET:
sdd.delete_spam_item_by_author(item, session)
else:
raise Exception("Unknown algorithm!")
def add_item(page_url, item_id, user, session, parent_id=None, action_type=None,
spam_detect_algo=su.ALGO_DIRICHLET):
""" Creates an item and adds it to the db."""
annot = ItemMixin.cls(page_url, item_id, user, parent_id=parent_id,
spam_detect_algo=spam_detect_algo)
session.add(annot)
session.flush()
# If the annotation is action, then create and bind the action.
if action_type is not None:
if parent_id is None:
raise Exception("New annotation which is action should have a parent!")
act = ActionMixin.cls(parent_id, user.id, action_type,
datetime.utcnow(), item_twin_id=annot.id)
item = ItemMixin.cls.get_item(parent_id, session)
if action_type == ACTION_UPVOTE:
item.author.mm_vote_counter += 1
item.author.vote_counter += 1
elif action_type == ACTION_DOWNVOTE:
item.author.mm_vote_counter -= 1
item.author.vote_counter -= 1
else:
raise Exception("Action should be whether upvote or donwvote!")
session.add(act)
session.flush()
return annot
def get_add_item(page_url, item_id, user, session, parent_id=None,
action_type=None, spam_detect_algo=su.ALGO_DIRICHLET):
annot = ItemMixin.cls.get_item(item_id, session)
# If annotation does not exist then create it.
if annot is None:
annot = add_item(page_url, item_id, user, session, parent_id=parent_id,
action_type=action_type, spam_detect_algo=spam_detect_algo)
return annot
def delete_item(item, session):
# If the item is action, then delete this action and then delete the item.
if item.children is not None and len(item.children) != 0:
# We cannot delete the item, it has subitems
print 'childred', item.children
print 'inside'
return
if item.action_twin is not None:
if item.action_twin.type == ACTION_UPVOTE:
item.parent.author.mm_vote_counter -= 1
item.parent.author.vote_counter -= 1
elif item.action_twin.type == ACTION_DOWNVOTE:
item.parent.author.mm_vote_counter += 1
item.parent.author.vote_counter += 1
else:
raise Exception("Unknown action: %s" % item.action_twin)
session.delete(item.action_twin)
session.delete(item)
session.flush()
def get_add_user(user_id, session):
""" The function retruns a user by its id (string), if the user record
does not exist then the function creates it and retunrs user object."""
user = UserMixin.cls.get_user(user_id, session)
if user is None:
user = UserMixin.cls(user_id)
session.add(user)
session.flush()
return user
def upvote(item, user, session):
# Checks whether the user has upvoted the item
upvote = ActionMixin.cls.get_action(item.id, user.id, ACTION_UPVOTE, session)
if upvote is not None:
# The item has been upvoted by the user.
return
# Undo downvote if it exists.
undo_downvote(item, user, session)
# Okay, upvoting fresh
act = ActionMixin.cls(item.id, user.id, ACTION_UPVOTE, datetime.utcnow())
# Increase item author's vote counter.
item.author.vote_counter += 1
raise_ham_flag(item, user, session)
session.add(act)
session.flush()
def downvote(item, user, session):
downvote = ActionMixin.cls.get_action(item.id, user.id, ACTION_DOWNVOTE, session)
if downvote is not None:
return
# Undo upvote is it exists.
undo_upvote(item, user, session)
# Downvoting
act = ActionMixin.cls(item.id, user.id, ACTION_DOWNVOTE, datetime.utcnow())
# Decrease item author's vote counter
item.author.vote_counter -= 1
session.add(act)
session.flush()
def undo_upvote(item, user, session):
upvote = ActionMixin.cls.get_action(item.id, user.id, ACTION_UPVOTE, session)
if upvote is None:
# Nothing to do
return
item.author.vote_counter -= 1
if SPAM_ALGO == su.ALGO_KARGER:
sdk._undo_spam_ham_flag(item, user, session, spam_flag=False)
elif SPAM_ALGO == su.ALGO_DIRICHLET:
sdd._undo_spam_ham_flag(item, user, session, spam_flag=False)
else:
raise Exception("unknown algorithm")
session.delete(upvote)
session.flush()
def undo_downvote(item, user, session):
downvote = ActionMixin.cls.get_action(item.id, user.id, ACTION_DOWNVOTE, session)
if downvote is None:
# Nothing to do
return
item.author.vote_counter += 1
session.delete(downvote)
session.flush()
| mshavlovsky/mannord | mannord/api.py | Python | bsd-2-clause | 8,226 |
# Copyright (C) 2013-2020 Internet Systems Consortium.
#
# Permission to use, copy, modify, and distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND INTERNET SYSTEMS CONSORTIUM
# DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# INTERNET SYSTEMS CONSORTIUM BE LIABLE FOR ANY SPECIAL, DIRECT,
# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
# FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
# WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# This file contains a number of common steps that are general and may be used
# By a lot of feature files.
#
import os
import sys
from forge_cfg import world, step
@step(r'stop process (\w+)')
def stop_a_named_process(process_name):
"""
Stop the process with the given name.
Parameters:
process_name ('process <name>'): Name of the process to stop.
"""
world.processes.stop_process(process_name)
@step(r'wait for (new )?(\w+) stderr message (\w+)(?: not (\w+))?')
def wait_for_err_message(new, process_name, message, not_message):
"""
Block until the given message is printed to the given process's stderr
output.
Parameter:
new: (' new', optional): Only check the output printed since last time
this step was used for this process.
process_name ('<name> stderr'): Name of the process to check the output of.
message ('message <message>'): Output (part) to wait for.
not_message ('not <message>'): Output (part) to wait for, and fail
Fails if the message is not found after 10 seconds.
"""
strings = [message]
if not_message is not None:
strings.append(not_message)
(found, line) = world.processes.wait_for_stderr_str(process_name, strings, new)
if not_message is not None:
assert found != not_message, line
@step(r'wait for (new )?(\w+) stdout message (\w+)(?: not (\w+))?')
def wait_for_out_message(process_name, message, not_message):
"""
Block until the given message is printed to the given process's stdout
output.
Parameter:
new: (' new', optional): Only check the output printed since last time
this step was used for this process.
process_name ('<name> stderr'): Name of the process to check the output of.
message ('message <message>'): Output (part) to wait for, and succeed.
not_message ('not <message>'): Output (part) to wait for, and fail
Fails if the message is not found after 10 seconds.
"""
strings = [message]
if not_message is not None:
strings.append(not_message)
(found, line) = world.processes.wait_for_stdout_str(process_name, strings, new)
if not_message is not None:
assert found != not_message, line
@step(r'the file (\S+) should (not )?exist')
def check_existence(file_name, should_not_exist):
"""
Check the existence of the given file.
Parameters:
file_name ('file <name>'): File to check existence of.
should_not_exist ('not', optional): Whether it should or should not exist.
Fails if the file should exist and does not, or vice versa.
"""
if should_not_exist is None:
assert os.path.exists(file_name), file_name + " does not exist"
else:
assert not os.path.exists(file_name), file_name + " exists"
| isc-projects/forge | tests/config.py | Python | isc | 3,628 |
# This file is a part of MediaDrop (http://www.mediadrop.net),
# Copyright 2009-2015 MediaDrop contributors
# For the exact contribution history, see the git revision log.
# The source code contained in this file is licensed under the GPLv3 or
# (at your option) any later version.
# See LICENSE.txt in the main project directory, for more information.
"""
Tag-based Categorization
Content can be labelled in an ad-hoc fashion with tags. Typically tags will
be displayed on the frontend using a 'tag cloud', rather than listing all
tags. This means you can tag all you want!
"""
import re
from itertools import izip
from sqlalchemy import Table, Column, sql, func
from sqlalchemy.types import Unicode, Integer
from sqlalchemy.orm import mapper, validates
from mediadrop.model import SLUG_LENGTH, slugify
from mediadrop.model.meta import DBSession, metadata
from mediadrop.plugin import events
tags = Table('tags', metadata,
Column('id', Integer, autoincrement=True, primary_key=True),
Column('name', Unicode(50), unique=True, nullable=False),
Column('slug', Unicode(SLUG_LENGTH), unique=True, nullable=False),
mysql_engine='InnoDB',
mysql_charset='utf8'
)
class Tag(object):
"""
Tag (keyword) for labelling content
.. attribute:: id
.. attribute:: name
Display name
.. attribute:: slug
A unique URL-friendly permalink string for looking up this object.
.. attribute:: media_content
.. attribute:: media_count_published
"""
query = DBSession.query_property()
def __init__(self, name=None, slug=None):
self.name = name or None
self.slug = slug or name or None
def __repr__(self):
return '<Tag: %r>' % self.name
def __unicode__(self):
return self.name
@validates('slug')
def validate_slug(self, key, slug):
return slugify(slug)
class TagList(list):
"""
List for easy rendering
Automatically prints the contained tags separated by commas::
>>> tags = TagList(['abc', 'def', 'ghi'])
>>> tags
abc, def, ghi
"""
def __unicode__(self):
return ', '.join([tag.name for tag in self.values()])
mapper(Tag, tags, order_by=tags.c.name, extension=events.MapperObserver(events.Tag))
excess_whitespace = re.compile('\s\s+', re.M)
def extract_tags(string):
"""Convert a comma separated string into a list of tag names.
NOTE: The space-stripping here is necessary to patch a leaky abstraction.
MySQL's string comparison with varchar columns is pretty fuzzy
when it comes to space characters, and is even inconsistent between
versions. We strip all preceding/trailing/duplicated spaces to be
safe.
"""
# count linebreaks as commas -- we assume user negligence
string = string.replace("\n", ',')
# strip repeating whitespace with a single space
string = excess_whitespace.sub(' ', string)
# make a tags list without any preceding and trailing whitespace
tags = [tag.strip() for tag in string.split(',')]
# remove duplicate and empty tags
tags = set(tag for tag in tags if tag)
return list(tags)
def fetch_and_create_tags(tag_names):
"""Return a list of Tag instances that match the given names.
Tag names that don't yet exist are created automatically and
returned alongside the results that did already exist.
If you try to create a new tag that would have the same slug
as an already existing tag, the existing tag is used instead.
:param tag_names: The display :attr:`Tag.name`
:type tag_names: list
:returns: A list of :class:`Tag` instances.
:rtype: :class:`TagList` instance
"""
lower_names = [name.lower() for name in tag_names]
slugs = [slugify(name) for name in lower_names]
# Grab all the tags that exist already, whether its the name or slug
# that matches. Slugs can be changed by the tag settings UI so we can't
# rely on each tag name evaluating to the same slug every time.
results = Tag.query.filter(sql.or_(func.lower(Tag.name).in_(lower_names),
Tag.slug.in_(slugs))).all()
# Filter out any tag names that already exist (case insensitive), and
# any tag names evaluate to slugs that already exist.
for tag in results:
# Remove the match from our three lists until its completely gone
while True:
try:
try:
index = slugs.index(tag.slug)
except ValueError:
index = lower_names.index(tag.name.lower())
tag_names.pop(index)
lower_names.pop(index)
slugs.pop(index)
except ValueError:
break
# Any remaining tag names need to be created.
if tag_names:
# We may still have multiple tag names which evaluate to the same slug.
# Load it into a dict so that duplicates are overwritten.
uniques = dict((slug, name) for slug, name in izip(slugs, tag_names))
# Do a bulk insert to create the tag rows.
new_tags = [{'name': n, 'slug': s} for s, n in uniques.iteritems()]
DBSession.execute(tags.insert(), new_tags)
DBSession.flush()
# Query for our newly created rows and append them to our result set.
results += Tag.query.filter(Tag.slug.in_(uniques.keys())).all()
return results
| jobsafran/mediadrop | mediadrop/model/tags.py | Python | gpl-3.0 | 5,446 |
import fiona
import os
from shapely.geometry import shape
from shapely.geometry import mapping
import arcpy
import os
def select_tiles(country, footprint):
tile_list = []
with fiona.open(footprint, 'r') as grid:
with fiona.open(country, 'r') as country:
# compare each feature in dataset 1 and 2
for g in grid:
tileid = g['properties']['Name'][-8:]
for i in country:
# print tile ID if geometry intersects
if shape(g['geometry']).intersects(shape(i['geometry'])):
#print "{}: intersects".format(tileid)
tile_list.append(tileid)
else:
pass
#print "{}: doesn't intersect".format(tileid)
return tile_list
def clipped_mask_list(tile_list, country_shapefile, datadir):
clipped_list = []
for tileid in tile_list:
mask_tile = os.path.join(r"s:\masks", tileid + ".shp")
clipped_mask = tileid + "_clip.shp"
clipped_mask_path = os.path.join(datadir, clipped_mask)
arcpy.Clip_analysis(mask_tile,country_shapefile, clipped_mask_path)
clipped_list.append(clipped_mask_path)
return clipped_list
def merge_clipped_masks(clipped_list, datadir, iso):
merged_masks = os.path.join(datadir, iso + "_merged_mask.shp")
arcpy.Merge_management(clipped_list, merged_masks)
return merged_masks
def merge_polygon_simplify(merged_masks, datadir, iso):
simp_masks = os.path.join(datadir, iso + "_tcd_mask.shp")
arcpy.SimplifyPolygon_cartography(merged_masks, simp_masks, "BEND_SIMPLIFY", "500 Meters", "100 Hectares")
return simp_masks
def update_remap_table(remap_table, shortyear):
with arcpy.da.UpdateCursor(remap_table, "from_") as cursor:
for row in cursor:
row[0] = shortyear
cursor.updateRow(row)
def update_reclass_function(lossyearmosaic, year_remap_function):
print "removing function"
arcpy.EditRasterFunction_management(lossyearmosaic, "EDIT_MOSAIC_DATASET", "REMOVE", year_remap_function)
print "inserting function"
arcpy.EditRasterFunction_management(lossyearmosaic, "EDIT_MOSAIC_DATASET", "INSERT", year_remap_function)
def create_mosaic(country_loss_30tcd, scratch_gdb):
out_cs = arcpy.SpatialReference(4326)
mosaic_name = "mosaic_country_loss_30tcd"
mosaic_path = os.path.join(scratch_gdb, mosaic_name)
arcpy.CreateMosaicDataset_management(scratch_gdb, mosaic_name, out_cs)
arcpy.AddRastersToMosaicDataset_management(mosaic_path, "Raster Dataset", country_loss_30tcd)
return os.path.join(scratch_gdb, mosaic_name)
| elizabethgoldman/emerging_hotspots | utilities.py | Python | apache-2.0 | 2,694 |
# -*- coding: utf-8 -*-
#
# Neural Monkey documentation build configuration file, created by
# sphinx-quickstart on Wed Aug 31 14:49:25 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.intersphinx'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Neural Monkey'
copyright = u'2016, Jindřich Libovický, Jindřich Helcl, Tomáš Musil'
author = u'Jindřich Libovický, Jindřich Helcl, Tomáš Musil'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.1'
# The full version, including alpha/beta/rc tags.
release = u'0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'img/gorilla-logo-half.png'
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'img/gorilla.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'NeuralMonkeydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'NeuralMonkey.tex', u'Neural Monkey Documentation',
u'Jindřich Libovický, Jindřich Helcl, Tomáš Musil', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'neuralmonkey', u'Neural Monkey Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'NeuralMonkey', u'Neural Monkey Documentation',
author, 'NeuralMonkey', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
def run_apidoc(_):
cur_dir = os.path.abspath(os.path.dirname(__file__))
print(cur_dir)
module = os.path.abspath(os.path.join(cur_dir, "..", "..", "neuralmonkey"))
print(module)
from sphinx.apidoc import main
main(['--separate', '-o', cur_dir, module, '--force'])
def skip(app, what, name, obj, skip, options):
if name == '__init__':
return False
return skip
def setup(app):
app.connect('autodoc-skip-member', skip)
app.connect('builder-inited', run_apidoc)
| ufal/neuralmonkey | docs/source/conf.py | Python | bsd-3-clause | 10,109 |
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ericsson AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from calvin.actor.actor import Actor, manage, condition, stateguard, calvinsys
class Knob(Actor):
"""
Read a knob to see which way it turned.
Outputs:
direction: clockwise or anti-clockwise
"""
@manage([])
def init(self):
self.setup()
def setup(self):
self._knob = calvinsys.open(self, "io.knob")
def will_migrate(self):
calvinsys.close(self._knob)
self._knob = None
def will_end(self):
if self._knob:
calvinsys.close(self._knob)
def did_migrate(self):
self.setup()
@stateguard(lambda self: calvinsys.can_read(self._knob))
@condition([], ["direction"])
def trigger(self):
return (calvinsys.read(self._knob),)
action_priority = (trigger, )
requires = ['io.knob']
test_calvinsys = {'io.knob': {'read': [-1, 1, 0, 1]}}
test_set = [
{
'outports': {'direction': [-1, 1, 0, 1]}
}
]
| EricssonResearch/calvin-base | calvin/actorstore/systemactors/io/Knob.py | Python | apache-2.0 | 1,570 |
# -*- python -*-
#
# This file is part of the cinapps.tcell package
#
# Copyright (c) 2012-2014 - EMBL-EBI
#
# File author(s): Thomas Cokelaer ([email protected])
#
# Distributed under the GLPv3 License.
# See accompanying file LICENSE.txt or copy at
# http://www.gnu.org/licenses/gpl-3.0.html
#
# website: www.cellnopt.org
#
##############################################################################
from __future__ import print_function
from cno import CNOError
from cno.io.sbml import SBML
from cno.io.sif import SIF
from cno.io.cnograph import CNOGraph
from cno.io.reactions import Reaction, Reactions
import bs4
import lxml
__all__ = ["SBMLQual"]
class SBMLQual(object):
"""Class to read and write SBML-qual file (logical models only)
This is not an interface to SBML or SBML-qual. See libsbml library for
that purpose. With this class you can read and write logical models
stored in SBML-qual format.
We do not guarantee that it covers all functionalities of SBML-qual but
files saved with this class can be read back to be used within CellNOpt.
You can convert CNOGraph of SIF instances to SBML-qual as follows::
.. plot::
:include-source:
:width: 80%
from cno import CNOGraph
c1 = CNOGraph()
c1.add_reaction("A+B=C")
c1.expand_and_gates()
c1.to_sbmlqual('test.xml')
c1.plot()
c2 = CNOGraph("test.xml')
assert c1 == c2
c2.plot()
"""
def __init__(self):
self.and_symbol = "^"
def to_sbmlqual(self, graph, filename=None):
"""Exports SIF to SBMLqual format.
:return: the SBML text
This is a level3, version 1 exporter.
::
>>> s = SIF()
>>> s.add_reaction("A=B")
>>> res = s.to_SBMLQual("test.xml")
"""
s = SBML(self, version="1", model_name='cellnopt_model')
if isinstance(graph, SIF):
data = graph.to_cnograph()
elif isinstance(graph, CNOGraph):
data = graph
else:
raise CNOError("Expected a CNOGraph of SIF intance as input")
sbml = s.create_header()
sbml += s.create_model_name()
sbml += s.create_compartment(id="main", constant="true")
# add the qualitativeSpecies list
qualitativeSpecies = QualitativeSpecies(data)
sbml += qualitativeSpecies.create()
# Starting list of transitions
list_of_transition = ListOfTransitions()
sbml += list_of_transition.open()
# Loop over all transitions
tid = 0
for node in sorted(data.nodes()):
predecessors = list(data.predecessors(node))
reactions = data.predecessors_as_reactions(node)
if self.and_symbol in node:
# if the output is a logical and, we skip the transition
# it will be taken into account when it is an input
continue
if len(predecessors) == 0:
continue # nothing to do, this is a source
# else we have a new transition. We increment the identifier
tid += 1
identifier = "t{0}".format(tid)
# and create a Transition
transition = Transition(identifier)
sbml += transition.open()
# the list of inputs
# - inputs could be an AND gate (e.g., A^!B=C), in which case, we want the
# species A and B to be extracted
# - inputs could be made of positive and neg from same species (e.g., A=A
# and !A=A ), which means two entries in the list of inputs.
species = {'-':[], '+':[]}
for pred in predecessors:
if self.and_symbol in pred:
d = Reaction(pred).get_signed_lhs_species()
else:
if data[pred][node]['link'] == '+':
d = {'+': [pred]}
else:
d = {'-': [pred]}
if '-' in d.keys():
species['-'].extend(d['-'])
if '+' in d.keys():
species['+'].extend(d['+'])
for k in species.keys():
species[k] = list(set(species[k]))
list_of_inputs = ListOfInputs(species, identifier)
sbml += list_of_inputs.create()
# The output (only one)
list_of_outputs = ListOfOutputs(node)
sbml += list_of_outputs.create()
# Now the list of functions. This is the most complicated
# but at the same time, we are lucky enough that in logical
# models, the list of functions (at least in cellnopt) is
# made of only one function, which is made of ORs. Inside
# the ORs, you could have sveral ANDs
list_of_function_terms = ListOfFunctionTerms()
sbml += list_of_function_terms.open()
sbml += list_of_function_terms.create_default_term()
# there will be only one function term
# if there is only one AND, starts with \and
# else with \ors
function_term = FunctionTerm()
sbml += function_term.open()
sbml += """<math xmlns="http://www.w3.org/1998/Math/MathML">"""
if len(predecessors) == 1 and self.and_symbol in predecessors[0]:
# a pure AND gate
mathml = MathAND(predecessors[0], identifier)
sbml += mathml.create()
elif len(predecessors) == 1 and self.and_symbol not in predecessors[0]:
# a direct link (no ORs, no ANDs)
lhs = Reaction(reactions[0]).lhs
sign = Reaction(reactions[0]).sign
if sign == '1':
sign = '+'
else:
sign = '-'
lhs = lhs.replace("!", "")
mathml = MathApply(lhs, identifier, sign=sign)
sbml += mathml.create()
else: # an OR gate
# inside the OR tag, you could have other gates
# that is MathAND or MathApply
# need to build a data structure that contains
# the type of links. Needed for ORs only. ANDs
# already contain the information in the name
mathml = MathOR(reactions, identifier)
sbml += mathml.create()
sbml += "</math>"
sbml += function_term.close()
sbml += list_of_function_terms.close()
sbml += transition.close()
# The end
sbml += list_of_transition.close()
sbml += """</model>\n"""
sbml += s.create_footer()
if filename is not None:
import lxml
parser = lxml.etree.XMLParser(remove_blank_text=True)
from io import StringIO
tree = lxml.etree.parse(StringIO(sbml), parser)
tree.write(filename, pretty_print=True)
else:
return sbml
def _prettify(self, sbml):
"""Return a pretty-printed XML string for the Element."""
# beautifulsoup does a much better job than minidom but all tags are
# transformed into lowercase.
return bs4.BeautifulSoup(sbml, "lxml").prettify()
# not always the best layout
#from xml.dom import minidom
#reparsed = minidom.parseString(sbml)
#return reparsed.toprettyxml(indent=" ", newl='')
def read_sbmlqual(self, filename):
"""import SBMLQual XML file into a SIF instance
:param str filename: the filename of the SBMLQual
:param bool clear: remove all existing nodes and edges
.. warning:: experimental
"""
# We could just use XML.etree. BeautifulSoup loses the upper cases while
# reading. Although this is an issue when writing, reading is not. This
# is acutally better because more robust.
sif = SIF()
res = bs4.BeautifulSoup(open(filename).read(), 'lxml')
# First, let us get the node names
#model = res.findAll("model")[0]
#allspecies = model.findAll("qual:listofqualitativespecies")[0]
nodes = [ x.get('qual:id') for x in res.findChildren("qual:qualitativespecies")]
# Then, we go through all function terms
for transition in res.findChildren("qual:transition"):
inputs = [x['qual:qualitativespecies'] for x in transition.findChildren("qual:input")]
signs = [x['qual:sign'] for x in transition.findChildren("qual:input")]
output = [x['qual:qualitativespecies'] for x in transition.findChildren("qual:output")]
assert len(output) == 1
assert len(inputs) == len(signs)
outputs = output * len(signs)
# there may be different functions so we will need to loop over them
functions = transition.findChildren("qual:functionterm")
if len(functions)>1:
CNOError("SBMLQual from cellnopt does not handle multiple functions")
contents = functions[0].findChild('apply')
if contents.find('and') and not contents.find('or'):
lhs = self._get_lhs_from_apply(contents)
reaction = lhs + "=" + outputs[0]
sif.add_reaction(str(reaction))
elif contents.find('or') and not contents.find('and'):
lhs = self._get_lhs_from_apply(contents)
reaction = lhs + "=" + outputs[0]
sif.add_reaction(str(reaction))
elif contents.find('or') is None and contents.find('and') is None:
lhs = self._get_lhs_from_apply(contents)
reaction = lhs + "=" + outputs[0]
sif.add_reaction(str(reaction))
else: #mulitple ORs
for content in contents.findChildren('apply', recursive=False):
lhs = self._get_lhs_from_apply(content)
reaction = lhs + "=" + outputs[0]
sif.add_reaction(str(reaction))
# sanity check
for node in nodes:
if node not in sif.species:
raise CNOError("A species without transition is not included in the network")
return sif
def _get_lhs_from_apply(self, xml):
entries = xml.findChildren('apply', recursive=False)
if len(entries) == 0:
entries = [xml]
lhs = []
for entry in entries:
if entry.find('geq') is not None:
name = entry.find('ci').text.strip()
lhs.append(name)
else:
name = entry.find('ci').text.strip()
lhs.append("!" + name)
if xml.find('and') is not None:
lhs = "^".join(list(set(lhs)))
else:
lhs = "+".join(list(set(lhs)))
return lhs
# NO NEED TO EXPORT ALL FOLLOWING CLASSES
# SBML-qual classes for logical modelling
class Qual(object):
version = "http://www.sbml.org/sbml/level3/version1/qual/version1"
def __init__(self, tag, xmlns=False):
self.tag = tag
self.xmlns = xmlns
self.open_attribute = {}
self.indent = ""
#self.version = '1'
def open(self):
if self.xmlns is False:
txt = """<qual:{0}""".format(self.tag)
for k,v in self.open_attribute.items():
txt+= """ qual:{0}="{1}" """.format(k,v) # note the space before 'qual'
txt += ">\n"
else:
txt = """<qual:{0} xmlns:qual="{1}">""".format(self.tag, self.version)
txt += "\n"
return txt
def close(self):
return """</qual:{0}>\n""".format(self.tag)
def indentation(self, sbml):
sbml = "".join([self.indent + x for x in sbml.split("\n")])
return sbml
class QualitativeSpecies(Qual):
def __init__(self, species):
super(QualitativeSpecies, self).__init__("listOfQualitativeSpecies", xmlns=True)
self.species = species
self.compartment = 'main'
self.constant = 'false'
def add_species(self, name):
sbml = """<qual:qualitativeSpecies """
sbml += """qual:constant="{0}" """.format(self.constant)
sbml += """qual:compartment="{0}" """.format(self.compartment)
sbml += """qual:id="{0}"/>\n""".format(name)
return sbml
def close(self):
return """</qual:{0}>\n""".format(self.tag)
def create(self):
sbml = self.open()
for name in self.species:
if "^" not in name:
sbml += self.add_species(name)
sbml += self.close()
sbml = self.indentation(sbml)
return sbml
class ListOfTransitions(Qual):
def __init__(self):
super(ListOfTransitions, self).__init__("listOfTransitions", xmlns=True)
class Transition(Qual):
"""A transition contains at most one ListOfOnputs and one ListofOutputs and
exactly one ListOfFunctionTerms
A transition defines the level associated withthe QualitativeSpecies that occur
when a Transition is enabled.
In logical models a Transition is used to specify the logical rule associated with a
QualitativeSpecies (that appears as an Output of this Transition). For example, the rule
if A > 1: B = 2 would be encapsulated as a Transition with 2 QualitativeSpecies **A** as
an input and **B** as an Output; if A > 1 rule being encode by the math element of a
3 FunctionTerm with the resultLevel attribute having a value 2.
In Petri net models a Transition is interpreted, using the common Petri net
semantics, as events that might occur within the system causing tokens to be moved.
"""
def __init__(self, identifier):
super(Transition, self).__init__("transition")
self.identifier = identifier
self.open_attribute = {'id':self.identifier}
class ListOfInputs(Qual):
"""The ListOfInputs contains at least one element of type Input.
The input parameter **species** is a dictionay with keys + and - containing list
of species in each category. A species could be in both categories.
"""
def __init__(self, species, identifier):
super(ListOfInputs, self).__init__("listOfInputs")
self.species = species
self.identifier = identifier
assert '+' in self.species.keys()
assert '-' in self.species.keys()
self.threshold = 1
self.transitionEffect = 'none'
def create(self):
txt = self.open()
# positive and then negative:
prefix = """<qual:input qual:thresholdLevel="{0}" """.format(self.threshold)
prefix += """ qual:transitionEffect="{0}" """.format(self.transitionEffect)
for name in self.species['+']:
txt += prefix
txt += """ qual:sign="positive" """
txt += """ qual:qualitativeSpecies="{0}" """.format(name)
txt += """ qual:id="theta_{0}_{1}"/>""".format(self.identifier, name)
for name in self.species['-']:
txt += prefix
txt += """ qual:sign="negative" """
txt += """ qual:qualitativeSpecies="{0}" """.format(name)
txt += """ qual:id="theta_{0}_{1}"/>""".format(self.identifier, name)
txt += self.close()
return txt
class ListOfOutputs(Qual):
"""In logical model, there is only one output
* thresholdLevel is set to 1
* transitionEffect is set to assignmentLevel
"""
def __init__(self, node):
super(ListOfOutputs, self).__init__('listOfOutputs')
self.name = node
def create(self):
txt = self.open()
txt += """<qual:output """
txt += """ qual:transitionEffect="assignmentLevel" """
txt += """ qual:qualitativeSpecies="{0}"/>\n""".format(self.name)
txt += self.close()
return txt
class ListOfFunctionTerms(Qual):
"""
contains 1 default terms and any number of function terms
"""
def __init__(self):
super(ListOfFunctionTerms, self).__init__('listOfFunctionTerms')
def create_default_term(self):
default = DefaultTerm()
return default.create()
#def add_list_function_term(self):
# raise NotImplementedError
class FunctionTerm(Qual):
"""associated with a result and to a boolean function inside a math element
that can be used to set the conditions inder which this term is selected
"""
def __init__(self):
super(FunctionTerm, self).__init__('functionTerm')
self.open_attribute = {'resultLevel': '1'}
class MathApply(object):
def __init__(self, name, identifier, sign="+"):
self.name = name
self.identifier = identifier
assert sign in ['+', '-']
self.sign = sign
def create(self):
txt = "<apply>\n"
if self.sign == '+':
txt += "<geq/>\n"
else:
txt += "<lt/>\n"
txt += "<ci> {0} </ci>\n".format(self.name)
txt += "<ci> theta_{0}_{1} </ci>\n".format(self.identifier, self.name)
txt += "</apply>\n"
return txt
class MathOR(object):
def __init__(self, reactions, identifier):
self.reactions = Reactions(reactions)
self.identifier = identifier
def create(self):
txt = '<apply>\n'
txt += '<or/>\n'
for reaction in self.reactions._reactions:
if "^" in reaction.name:
ml = MathAND(reaction.name, self.identifier)
txt += ml.create()
else:
if reaction.sign == '1':
sign = '+'
else:
sign = '-'
name = reaction.lhs
name = name.replace("!", "")
ml = MathApply(name, self.identifier, sign)
txt += ml.create()
txt += '</apply>\n'
return txt
class MathAND(object):
"""Get MathML representation of an AND gate.
"""
def __init__(self, reaction, identifier):
"""
identifier is the transition identifier
"""
self.reaction = Reaction(reaction)
self.identifier = identifier
def create(self):
txt = '<apply>\n'
txt += '<and/>\n'
species = self.reaction.get_signed_lhs_species()
for name in species['+']:
mathapply = MathApply(name, self.identifier)
txt += mathapply.create()
for name in species['-']:
mathapply = MathApply(name, self.identifier, '-')
txt += mathapply.create()
txt += '</apply>\n'
return txt
#class Math(Qual):
# def __init__(self):
# super(Math,self).__init__('math')##
#
# def open(self):
# return """<math xmlns="http://www.w3.org/1998/Math/MathML">"""
class DefaultTerm(Qual):
"""resultLevel is set to 0"""
def __init__(self):
super(DefaultTerm, self).__init__('defaultTerm')
self.open_attribute = {'resultLevel': 0}
def create(self):
txt = self.open()
txt += self.close()
return txt
| cellnopt/cellnopt | cno/io/sbmlqual.py | Python | bsd-2-clause | 19,177 |
'''
Copyright 2013 Mark Dredze. All rights reserved.
This software is released under the 2-clause BSD license.
Mark Dredze, [email protected]
'''
# Input- a whitelist for each area, a list of submissions per area, csv file with reviewer signups
# The area must add people to their whitelist that they approve of by using my other script
# Merge reviewer duplicates on input
# Algorithm:
# Find choices for every reviewer that respect the whitelist.
# Find every reviewer with only a single choice overall and assign them to their area
# assume a default of X papers by reviewer
# keep track of the number of reviewers assigned to each area and the number needed by each area
# for reviwers with 2 choices
# round robin between areas- for each area, find a reviewer who wants that area as #1, pick them
# Keep selecting area until it is full
# If no first round, then pick a second choice reviewer
# Once all areas are full, continue assignment but put an area in the round robin list propto the number of reviewers they need
# Start with an assumption of 3 * number of papers / load (4) but load is adjustable
from acl_check_reviewers import selectAreaName
import sys, os, csv, re, glob, random
class CsvLoader:
def __init__(self, filename):
self.__loadFile(filename)
def __mapColumns(self, entry):
self.names_to_columns = {}
self.columns_to_names = {}
for ii, name in enumerate(entry):
self.names_to_columns[name.lower()] = ii
self.columns_to_names[ii] = name.lower()
def getColumn(self, entry, column):
column_id = self.names_to_columns[column]
if column_id >= len(entry):
return ''
return entry[column_id]
def getColumnNames(self):
return self.names_to_columns.keys()
def __loadFile(self, filename):
file = open(filename)
reader = csv.reader(file)
self.contents = []
for ii, entry in enumerate(reader):
if ii == 0:
self.__mapColumns(entry)
continue
line = {}
for jj in range(len(entry)):
line[self.columns_to_names[jj]] = entry[jj]
self.contents.append(line)
file.close()
def __iter__(self):
return self.contents.__iter__()
def __len__(self):
return len(self.contents)
class ACLAssignGreedyReviewers:
def __init__(self):
self.want_string = 'Want to review (1st Choices)'
self.willing_string = 'Willing to review (2nd Choices)'
self.will_not_string = 'Will not review'
self.name_field = 'name'
self.email_field = 'email'
self.start_account_username = 'start account username'
pass
def loadReviewerInformation(self, reviewer_csv_filename):
csv_loader = CsvLoader(reviewer_csv_filename)
column_names = csv_loader.getColumnNames()
if 'name (first last)' in column_names:
self.name_field = 'name (first last)'
if 'email address' in column_names:
self.email_field = 'email address'
area_entry_to_name = {}
for entry in column_names:
if entry.startswith('areas ['):
area_name = re.search('areas \[(.+?) \(', entry).group(1)
area_entry_to_name[entry] = area_name
names = set()
emails = set()
reviewer_to_area_choices = {}
emails_to_reviewer_id_dict = {}
from_reviewer_id_dict = {}
reviewer_to_load = {}
num_lines = 0
for entry in csv_loader:
num_lines += 1
if len(entry) == 0:
continue
name = entry[self.name_field]
email = entry[self.email_field].lower().strip()
start_account_username = entry[self.start_account_username]
load_for_reviewer = entry['reduced review load (optional)']
reviewer_id = name.replace(' ', '_') + '_' + email.replace(' ', '_')
# Is this a valid email address.
if '@' not in email or ' ' in email:
print 'Warning: Invalid email: %s (%s)' % (name, email)
duplicate_reviewer = False
if reviewer_id in from_reviewer_id_dict or name in names or email in emails:
if email in emails_to_reviewer_id_dict:
reviewer_id = emails_to_reviewer_id_dict[email]
duplicate_reviewer = True
else:
print 'Warning: duplicate reviewer name: ', name
try:
load_for_reviewer = int(load_for_reviewer)
reviewer_to_load[reviewer_id] = load_for_reviewer
print 'Registered load limit for %s (%s): %d' % (name, email, load_for_reviewer)
except:
pass
names.add(name)
emails.add(email)
emails_to_reviewer_id_dict[email] = reviewer_id
tuple = (name, email, start_account_username)
from_reviewer_id_dict[reviewer_id] = tuple
area_choices = []
for area_entry, area_name in area_entry_to_name.iteritems():
if area_entry not in entry:
continue
choice = entry[area_entry]
rating = None
if choice == self.want_string:
rating = 1
elif choice == self.willing_string:
rating = 2
if rating != None:
area_choices.append((area_name, rating))
if duplicate_reviewer:
# Merge the reveiwers choices by always taking their higher choice.
old_area_choices = reviewer_to_area_choices[reviewer_id]
new_area_choices = {}
for area, rating in area_choices:
new_area_choices[area] = rating
for area, rating in old_area_choices:
new_area_choices[area] = max(new_area_choices.setdefault(area, 0), rating)
area_choices = []
for area, choice in new_area_choices.iteritems():
area_choices.append((area, choice))
reviewer_to_area_choices[reviewer_id] = area_choices
print 'Number of lines: %d' % num_lines
print 'Loaded %d/%d reviewers.' % (len(from_reviewer_id_dict), len(reviewer_to_area_choices))
return reviewer_to_area_choices, emails_to_reviewer_id_dict, from_reviewer_id_dict, reviewer_to_load
def selectReviewerForArea(self, area, reviewers_per_area_lists, used_reviewers):
while len(reviewers_per_area_lists[area]) > 0:
reviewer = reviewers_per_area_lists[area].pop(0)
if reviewer not in used_reviewers:
return reviewer
return None
# To handle differences in number of papers, we want to have some areas get multiple
# people per round so every area fills up at the same time.
# This methods computes how many assignments are needed to reach that point.
def computeNumAreaAssignmentPerRound(self, area_to_load, area_to_num_papers, area_to_paper_load, priority_areas):
area_to_num_assignments_per_round = {}
# Compute how many reviewers are needed in each area.
reviewers_needed_per_area = {}
min = None
max = None
for area in area_to_num_papers.keys():
reviewers_needed_per_area[area] = (area_to_num_papers[area] * area_to_paper_load[area]) / area_to_load[area]
if min == None or min > reviewers_needed_per_area[area]:
min = reviewers_needed_per_area[area]
if max == None or max < reviewers_needed_per_area[area]:
max = reviewers_needed_per_area[area]
if max / min < 2:
# The max isn't even twice the min area, so scale things up.
print 'Error: max is not greater than twice min. Using 1 for everything.'
# The min area gets one reviewer per round and every other area gets int(area/min)
for area, num_reviewers in reviewers_needed_per_area.iteritems():
area_to_num_assignments_per_round[area] = int(num_reviewers / min)
for area in area_to_num_assignments_per_round.keys():
if priority_areas != None and area in priority_areas:
area_to_num_assignments_per_round[area] *= self.increase_priority_factor
return area_to_num_assignments_per_round
def assignReviewers(self, reviewers_per_area_lists, \
reviewer_load_constraint, area_to_load, area_to_num_papers, \
area_to_num_assignments_per_round, area_to_paper_load, \
assign_all_whitelist_reviewers_to_area, forced_reviewer_to_area, \
min_reviewers_per_area):
# For each area, we have a list of reviewers who selected that area ordered by first choice,
# the second choice, then by the number of total areas they picked.
# A dict between area name and the reviewers assigned to that area.
assignments = {}
areas = area_to_num_papers.keys()
print 'Assigning to %d areas.' % len(areas)
area_to_num_reviews_assigned = {}
for area in areas:
area_to_num_reviews_assigned[area] = 0
# In each pass, find reviewers who have only n selections. This saves the more flexible
# reviewers for later.
assignment_made = False
full_areas = set()
used_reviewers = set()
all_areas_full = False
all_areas_have_been_filled = False
if assign_all_whitelist_reviewers_to_area:
for area in assign_all_whitelist_reviewers_to_area:
# Give everyone to this area that they want.
while True:
reviewer = self.selectReviewerForArea(area, reviewers_per_area_lists, used_reviewers)
if not reviewer:
break
# Assign the reviewer to the area.
assignments.setdefault(area, set()).add(reviewer)
used_reviewers.add(reviewer)
# How many reviews did we just assign to this area?
if reviewer in reviewer_load_constraint:
load_constraint = reviewer_load_constraint[reviewer]
# A reviewer cannot exceed the load for an area.
this_reviewer_load = min(load_constraint, area_to_load[area])
if this_reviewer_load != area_to_load[area]:
print 'LOAD LIMIT for %s: %d instead of %d' % (reviewer, this_reviewer_load, area_to_load[area])
else:
# This isn't a constraint for this area. Remove it
del reviewer_load_constraint[reviewer]
else:
this_reviewer_load = area_to_load[area]
area_to_num_reviews_assigned[area] += this_reviewer_load
# Declare this area full.
full_areas.add(area)
# Assign reviewers to an area if they are forced to that area by special request.
for (reviewer, area) in forced_reviewer_to_area.iteritems():
assignments.setdefault(area, set()).add(reviewer)
used_reviewers.add(reviewer)
# How many reviews did we just assign to this area?
if reviewer in reviewer_load_constraint:
load_constraint = reviewer_load_constraint[reviewer]
# A reviewer cannot exceed the load for an area.
this_reviewer_load = min(load_constraint, area_to_load[area])
if this_reviewer_load != area_to_load[area]:
print 'LOAD LIMIT for %s: %d instead of %d' % (reviewer, this_reviewer_load, area_to_load[area])
else:
# This isn't a constraint for this area. Remove it
del reviewer_load_constraint[reviewer]
else:
this_reviewer_load = area_to_load[area]
area_to_num_reviews_assigned[area] += this_reviewer_load
# Does this area need more reviewers?
if area_to_num_reviews_assigned[area] >= \
(area_to_num_papers[area] * area_to_paper_load[area]) \
and not all_areas_full \
and min_reviewers_per_area <= len(assignments[area]):
full_areas.add(area)
while True:
assignment_made = False
#random.shuffle(areas)
for area in areas:
if area in full_areas and not all_areas_full:
continue
for ii in range(0, int(area_to_num_assignments_per_round[area])):
# Increase until we find a reviewer. or continue if we cannot.
reviewer = self.selectReviewerForArea(area, reviewers_per_area_lists, used_reviewers)
if not reviewer:
continue # We found no valid reviewer.
# Assign the reviewer to the area.
assignments.setdefault(area, set()).add(reviewer)
assignment_made = True
used_reviewers.add(reviewer)
# How many reviews did we just assign to this area?
if reviewer in reviewer_load_constraint:
load_constraint = reviewer_load_constraint[reviewer]
# A reviewer cannot exceed the load for an area.
this_reviewer_load = min(load_constraint, area_to_load[area])
if this_reviewer_load != area_to_load[area]:
#print 'LOAD LIMIT for %s: %d instead of %d' % (reviewer, this_reviewer_load, area_to_load[area])
pass
else:
# This isn't a constraint for this area. Remove it
del reviewer_load_constraint[reviewer]
else:
this_reviewer_load = area_to_load[area]
area_to_num_reviews_assigned[area] += this_reviewer_load
# Does this area need more reviewers?
if area_to_num_reviews_assigned[area] >= \
(area_to_num_papers[area] * area_to_paper_load[area]) \
and not all_areas_full \
and min_reviewers_per_area <= len(assignments[area]):
full_areas.add(area)
break
# Do we have enough assignments made?
if len(full_areas) == len(areas) and not all_areas_full:
all_areas_full = True
all_areas_have_been_filled = True
elif not assignment_made and not all_areas_full:
# The areas didn't just fill up, but no assignments were made.
# Start making all remaining assignments anyway.
all_areas_full = True
elif not assignment_made and all_areas_full:
# We can no longer make assignments and we've tried to make any assignment.
break
print 'Assignments finished.'
if all_areas_have_been_filled:
print 'All areas full.'
else:
print 'Not all areas full.'
area_list = []
for area in areas:
if area not in full_areas:
area_list.append(area)
print 'Needs reviewers: ' + ' | '.join(area_list)
areas.sort()
for area in areas:
coverage = area_to_num_reviews_assigned[area]/area_to_paper_load[area] / float(area_to_num_papers[area]) * 100
prefix = ''
if coverage < 100:
prefix='* '
print '%s%s (Reviewers: %d, Max review capacity: %d, Actual reviews needed: %d, Coverage: %.0f%%)' % (prefix, area, len(assignments[area]), area_to_num_reviews_assigned[area]/area_to_paper_load[area], area_to_num_papers[area], coverage)
return assignments, area_to_num_reviews_assigned
# Create a map between area and reviewer, with reviewers sorted by choice.
def createAreaReviewerLists(self, reviewer_to_area_choices, area_to_whitelist, accept_all_reviewers=False):
reviewers_per_area_lists = {}
area_to_total_possible_reviewers = {}
for reviewer, area_choices in reviewer_to_area_choices.iteritems():
for area, rating in area_choices:
if accept_all_reviewers or (area in area_to_whitelist and reviewer in area_to_whitelist[area]):
reviewers_per_area_lists.setdefault(area, []).append((rating, reviewer))
area_to_total_possible_reviewers[area] = area_to_total_possible_reviewers.setdefault(area, 0) + 1
for area, list in reviewers_per_area_lists.iteritems():
list.sort(reverse=False)
new_list = []
for num, reviewer in list:
new_list.append(reviewer)
reviewers_per_area_lists[area] = new_list
print 'Accepted reviewers per area (not including forced reviewers).'
for area in area_to_whitelist:
percent = float(len(reviewers_per_area_lists[area])) / float(area_to_total_possible_reviewers[area]) * 100
print '\t%s %d accepted / %d total (%.2f)' % (area, len(reviewers_per_area_lists[area]), area_to_total_possible_reviewers[area], percent)
return reviewers_per_area_lists
def getSecondArgument(self, line):
line = line.strip()
split_line = line.split('\t')
if len(split_line) < 2:
print 'Error on line: %s' % line
argument = split_line[1]
return argument
def loadWhitelists(self, whitelist_files, emails_to_reviewer_id_dict, forced_reviewer_to_area):
whitelists = {}
area_to_load = {}
area_to_paper_load = {}
for filename in whitelist_files:
print 'Loading whitelist: %s' % filename
file = open(filename)
lines = file.readlines()
file.close()
if not lines[0].startswith('#') and not lines[1].startswith('#') and not lines[2].startswith('#'):
print 'Error in whitelist file. Missing # on first three lines. ', filename
area_name = self.getSecondArgument(lines[0])
area_to_load[area_name] = int(self.getSecondArgument(lines[1]))
area_to_paper_load[area_name] = int(self.getSecondArgument(lines[2]))
num_loaded_reviewers = 0
whitelists[area_name] = set()
for line in lines[3:]:
line = line.strip()
if line.startswith('#') or line == '':
continue
split_line = line.split('\t')
if len(split_line) != 2:
print 'Error on line: "%s"' % line
reviewer_name = split_line[0].strip()
reviewer_email = split_line[1].strip().lower()
if reviewer_email not in emails_to_reviewer_id_dict:
print 'Error: whitelist contains unknown reviewer: "%s" "%s"' % (reviewer_name, reviewer_email)
sys.exit()
if reviewer_name.startswith('*'):
reviewer_name = reviewer_name[1:]
print 'Forcing reviewer %s to area %s' % (reviewer_name, area_name)
reviewer_id = emails_to_reviewer_id_dict[reviewer_email]
if reviewer_id in forced_reviewer_to_area:
print 'Error. %s is being forced to multiple areas.' % reviewer_name
area_list_to_print = [forced_reviewer_to_area[reviewer_id], area_name]
print '\tAreas: %s' % '|'.join(area_list_to_print)
sys.exit()
else:
forced_reviewer_to_area[emails_to_reviewer_id_dict[reviewer_email]] = area_name
num_loaded_reviewers += 1
else:
whitelists[area_name].add(emails_to_reviewer_id_dict[reviewer_email])
num_loaded_reviewers += 1
print 'Loaded %d reviewers for area %s.' % (num_loaded_reviewers, area_name)
print 'Processed %d whitelists.' % len(whitelists)
return whitelists, area_to_load, area_to_paper_load
def getWhitelistFilenames(self, whitelist_files_prefix):
return glob.glob(whitelist_files_prefix + '*')
def printFinalAssignmentStats(self, output_filename_prefix, assignments, from_reviewer_id_dict, reviewer_load_constraint):
output = open(output_filename_prefix + '_all_list.csv', 'w')
#output.write('#name\temail\tmax papers to assign\tarea\n')
output.write('#username\temail\tfirst\tlast\ttrack\tmax papers to assign\n')
for area_name, reviewers in assignments.iteritems():
filename = area_name.replace(' ', '_').replace('/', '_').replace('&', '_')
area_output = open(output_filename_prefix + filename + '.csv', 'w')
area_output.write('#name\temail\tmax papers to assign\n')
for reviewer in reviewers:
reviewer_name, reviewer_email, start_account_username = from_reviewer_id_dict[reviewer]
load_constraint = ''
if reviewer in reviewer_load_constraint:
load_constraint = str(reviewer_load_constraint[reviewer])
firstname, lastname = reviewer_name.rsplit(' ', 1)
output.write('%s\t%s\t%s\t%s\t%s\t%s\n' % (start_account_username, reviewer_email, firstname, lastname, area_name, load_constraint))
area_output.write('%s\t%s\t%s\n' % (reviewer_name, reviewer_email, load_constraint))
area_output.close()
output.close()
# Process a filename of the following format:
# areaname \t reviewer load (how many papers per reviewer) \t num submissions
def loadAreaStats(self, area_stats_filename):
file = open(area_stats_filename)
area_to_num_papers = {}
total_submissions = 0
for line in file:
line = line.strip()
if line.startswith('#') or line == '':
continue
print line
area_name, submissions = line.split('\t')
area_to_num_papers[area_name.lower()] = int(submissions)
total_submissions += int(submissions)
file.close()
print 'Total submissions: %d' % total_submissions
return area_to_num_papers
def computeReviewerStats(self, assignments, reviewer_to_area_choices):
total_choice_scores = 0
total_assigned = 0
rating_counts = [0,0]
assigned_reviewers = set()
for area_name, reviewer_list in assignments.iteritems():
for reviewer in reviewer_list:
assigned_reviewers.add(reviewer)
area_choices = reviewer_to_area_choices[reviewer]
for choice, rating in area_choices:
if choice == area_name:
total_choice_scores += rating
total_assigned += 1
rating_counts[rating-1] += 1
break
print 'Average choice rating: ' + str(float(total_choice_scores) / float(total_assigned))
print 'Reviewers with first choice: ' + str(rating_counts[0])
print 'Reviewers with second choice: ' + str(rating_counts[1])
print 'Assigned reviewers: ' + str(total_assigned)
print 'Total reviewers: ' + str(len(reviewer_to_area_choices))
# Who wasn't assigned?
unassigned_reviewers = set()
for reviewer in reviewer_to_area_choices.keys():
if reviewer not in assigned_reviewers:
unassigned_reviewers.add(reviewer)
print 'Unassigned reviewers: %s' % ', '.join(unassigned_reviewers)
def loadReviewerLoadConstraints(self, reviewer_load_constraints_files, email_to_reviewer_id):
reviewer_load_constraint = {}
for filename in reviewer_load_constraints_files:
print 'Loading constraints from file: %s' % filename
file = open(filename)
for line in file:
line = line.strip()
if line.startswith('#') or line == '':
continue
email, load = line.split('\t')
email = email.lower()
if email not in email_to_reviewer_id:
print 'Error: Loaded a constraint for %s but could not find this reviewer.' % email
else:
reviewer_id = email_to_reviewer_id[email]
reviewer_load_constraint[reviewer_id] = int(load)
file.close()
return reviewer_load_constraint
def run(self):
# Load the acl signup sheet
# Load a csv file with whitelists. each file should start with the area name on the first line
# load a reviewer constraint list.
# A file containing the number of submissions per area
# An output file to write the assignments
# A whitelist file should contain the area name on the first line preceeded by a pound. It then contains the email addresses of each reviewer.
# reviewer_to_area_choices- filtered by whitelists. For each reviewer, a list of tuples with area and rating (1,2)
# area_to_load- the load per reviewer in each area
# area_to_num_papers- the number of papers in each area.
# area_to_num_assignments_per_round = {} # In the round robin assignment, give multiple reviewers to each
# area based on the number of reviewers they have total.
if len(sys.argv) != 5:
print 'Usage: %s reviewer_csv area_stats_filename whitelist_files_prefix output_filename_prefix' % sys.argv[0]
sys.exit()
reviewer_csv = sys.argv[1]
area_stats_filename = sys.argv[2]
whitelist_files_prefix = sys.argv[3]
output_filename_prefix = sys.argv[4]
#reviewer_load_constraints_prefix = sys.argv[5]
accept_all_reviewers = False
# Automatically give this area everyone in its whitelist that is available.
assign_all_whitelist_reviewers_to_area = None#set(['spoken language processing', 'nlp-enabled technology'])
# These areas receive extra reviewers on each round so they fill up.
priority_areas = None#set(['spoken language processing', 'nlp-enabled technology', 'multimodal nlp'])
self.increase_priority_factor = 2
# Small tracks may end up getting a handfull of reviewers. Ensure that there are at least this many
# reviewers per area
min_reviewers_per_area = 10
################################################
area_to_num_papers = self.loadAreaStats(area_stats_filename)
# Dedpue and normalize reviewer list
# emails_to_reviewer_id_dict # A dictionary between emails to reviewer ids.
# from_reviewer_id_dict # A dictionary containing reviewer names and emails (tuple) from reviewer id.
reviewer_to_area_choices, emails_to_reviewer_id_dict, from_reviewer_id_dict, reviewer_load_constraint = self.loadReviewerInformation(reviewer_csv)
# Load whitelists and normalize reviewers.
# A dictionary mapping area to a set of reviewer_ids
#reviewer_load_constraints_files = self.getWhitelistFilenames(reviewer_load_constraints_prefix)
#reviewer_load_constraint = self.loadReviewerLoadConstraints(reviewer_load_constraints_files, emails_to_reviewer_id_dict)
forced_reviewer_to_area = {}
whitelist_files = self.getWhitelistFilenames(whitelist_files_prefix)
area_to_whitelist, area_to_load, area_to_paper_load = self.loadWhitelists(whitelist_files, emails_to_reviewer_id_dict, forced_reviewer_to_area)
# normalize reviewers by unique keys based on email and username so we can match against whitelists
reviewers_per_area_lists = self.createAreaReviewerLists(reviewer_to_area_choices, area_to_whitelist, accept_all_reviewers=accept_all_reviewers)
# area_to_paper_load- the number of reviewers needed for each paper in each area
area_to_num_assignments_per_round = self.computeNumAreaAssignmentPerRound(area_to_load, area_to_num_papers, area_to_paper_load, priority_areas)
assignments, area_to_num_reviews_assigned = \
self.assignReviewers(reviewers_per_area_lists, reviewer_load_constraint, area_to_load, \
area_to_num_papers, area_to_num_assignments_per_round, area_to_paper_load, \
assign_all_whitelist_reviewers_to_area, forced_reviewer_to_area, min_reviewers_per_area)
self.computeReviewerStats(assignments, reviewer_to_area_choices)
self.printFinalAssignmentStats(output_filename_prefix, assignments, from_reviewer_id_dict, reviewer_load_constraint)
if __name__ == '__main__':
ACLAssignGreedyReviewers().run() | mdredze/automated_reviewer_assigner | python/acl_greedy_assign_reviewers.py | Python | bsd-2-clause | 24,923 |
"""Storage for pytest objects during test runs
The objects in the module will change during the course of a test run,
so they have been stashed into the 'store' namespace
Usage:
# imported directly (store is pytest.store)
from cfme.fixtures.pytest_store import store
store.config, store.pluginmanager, store.session
The availability of these objects varies during a test run, but
all should be available in the collection and testing phases of a test run.
"""
import fauxfactory
import os
import sys
from _pytest.terminal import TerminalReporter
from cached_property import cached_property
from py.io import TerminalWriter
from cfme.utils import diaper
class FlexibleTerminalReporter(TerminalReporter):
"""A TerminalReporter stand-in that pretends to work even without a py.test config."""
def __init__(self, config=None, file=None):
if config:
# If we have a config, nothing more needs to be done
return TerminalReporter.__init__(self, config, file)
# Without a config, pretend to be a TerminalReporter
# hook-related functions (logreport, collection, etc) will be outrigt broken,
# but the line writers should still be usable
if file is None:
file = sys.stdout
self._tw = self.writer = TerminalWriter(file)
self.hasmarkup = self._tw.hasmarkup
self.reportchars = ''
self.currentfspath = None
class Store(object):
"""pytest object store
If a property isn't available for any reason (including being accessed outside of a pytest run),
it will be None.
"""
@property
def current_appliance(self):
# layz import due to loops and loops and loops
from cfme.utils import appliance
# TODO: concieve a better way to detect/log import-time missuse
# assert self.config is not None, 'current appliance not in scope'
return appliance.current_appliance
def __init__(self):
#: The py.test config instance, None if not in py.test
self.config = None
#: The current py.test session, None if not in a py.test session
self.session = None
#: Parallelizer role, None if not running a parallelized session
self.parallelizer_role = None
# Stash of the "real" terminal reporter once we get it,
# so we don't have to keep going through pluginmanager
self._terminalreporter = None
#: hack variable until we get a more sustainable solution
self.ssh_clients_to_close = []
self.uncollection_stats = {}
@property
def has_config(self):
return self.config is not None
def _maybe_get_plugin(self, name):
""" returns the plugin if the pluginmanager is availiable and the plugin exists"""
return self.pluginmanager and self.pluginmanager.getplugin(name)
@property
def in_pytest_session(self):
return self.session is not None
@property
def fixturemanager(self):
# "publicize" the fixturemanager
return self.session and self.session._fixturemanager
@property
def capturemanager(self):
return self._maybe_get_plugin('capturemanager')
@property
def pluginmanager(self):
# Expose this directly on the store for convenience in getting/setting plugins
return self.config and self.config.pluginmanager
@property
def terminalreporter(self):
if self._terminalreporter is not None:
return self._terminalreporter
reporter = self._maybe_get_plugin('terminalreporter')
if reporter and isinstance(reporter, TerminalReporter):
self._terminalreporter = reporter
return reporter
return FlexibleTerminalReporter(self.config)
@property
def terminaldistreporter(self):
return self._maybe_get_plugin('terminaldistreporter')
@property
def parallel_session(self):
return self._maybe_get_plugin('parallel_session')
@property
def slave_manager(self):
return self._maybe_get_plugin('slave_manager')
@property
def slaveid(self):
return getattr(self.slave_manager, 'slaveid', None)
@cached_property
def my_ip_address(self):
try:
# Check the environment first
return os.environ['CFME_MY_IP_ADDRESS']
except KeyError:
# Fall back to having an appliance tell us what it thinks our IP
# address is
return self.current_appliance.ssh_client.client_address()
def write_line(self, line, **kwargs):
return write_line(line, **kwargs)
store = Store()
def pytest_namespace():
# Expose the pytest store as pytest.store
return {'store': store}
def pytest_plugin_registered(manager):
# config will be set at the second call to this hook
if store.config is None:
store.config = manager.getplugin('pytestconfig')
def pytest_sessionstart(session):
store.session = session
def write_line(line, **kwargs):
"""A write-line helper that should *always* write a line to the terminal
It knows all of py.tests dirty tricks, including ones that we made, and works around them.
Args:
**kwargs: Normal kwargs for pytest line formatting, stripped from slave messages
"""
if store.slave_manager:
# We're a pytest slave! Write out the vnc info through the slave manager
store.slave_manager.message(line, **kwargs)
else:
# If py.test is supressing stdout/err, turn that off for a moment
with diaper:
store.capturemanager.suspendcapture()
# terminal reporter knows whether or not to write a newline based on currentfspath
# so stash it, then use rewrite to blow away the line that printed the current
# test name, then clear currentfspath so the test name is reprinted with the
# write_ensure_prefix call. shenanigans!
cfp = store.terminalreporter.currentfspath
# carriage return, write spaces for the whole line, carriage return, write the new line
store.terminalreporter.line('\r' + ' ' * store.terminalreporter._tw.fullwidth + '\r' + line,
**kwargs)
store.terminalreporter.currentfspath = fauxfactory.gen_alphanumeric(8)
store.terminalreporter.write_ensure_prefix(cfp)
# resume capturing
with diaper:
store.capturemanager.resumecapture()
| anurag03/integration_tests | cfme/fixtures/pytest_store.py | Python | gpl-2.0 | 6,454 |
"""
FormWizard class -- implements a multi-page form, validating between each
step and storing the form's state as HTML hidden fields so that no state is
stored on the server side.
"""
import cPickle as pickle
from django import forms
from django.conf import settings
from django.contrib.formtools.utils import security_hash, form_hmac
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.crypto import constant_time_compare
from django.utils.hashcompat import md5_constructor
from django.utils.translation import ugettext_lazy as _
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
class FormWizard(object):
# The HTML (and POST data) field name for the "step" variable.
step_field_name="wizard_step"
# METHODS SUBCLASSES SHOULDN'T OVERRIDE ###################################
def __init__(self, form_list, initial=None):
"""
Start a new wizard with a list of forms.
form_list should be a list of Form classes (not instances).
"""
self.form_list = form_list[:]
self.initial = initial or {}
# Dictionary of extra template context variables.
self.extra_context = {}
# A zero-based counter keeping track of which step we're in.
self.step = 0
def __repr__(self):
return "step: %d\nform_list: %s\ninitial_data: %s" % (self.step, self.form_list, self.initial)
def get_form(self, step, data=None):
"Helper method that returns the Form instance for the given step."
return self.form_list[step](data, prefix=self.prefix_for_step(step), initial=self.initial.get(step, None))
def num_steps(self):
"Helper method that returns the number of steps."
# You might think we should just set "self.num_steps = len(form_list)"
# in __init__(), but this calculation needs to be dynamic, because some
# hook methods might alter self.form_list.
return len(self.form_list)
def _check_security_hash(self, token, request, form):
expected = self.security_hash(request, form)
if constant_time_compare(token, expected):
return True
else:
# Fall back to Django 1.2 method, for compatibility with forms that
# are in the middle of being used when the upgrade occurs. However,
# we don't want to do this fallback if a subclass has provided their
# own security_hash method - because they might have implemented a
# more secure method, and this would punch a hole in that.
# PendingDeprecationWarning <- left here to remind us that this
# compatibility fallback should be removed in Django 1.5
FormWizard_expected = FormWizard.security_hash(self, request, form)
if expected == FormWizard_expected:
# They didn't override security_hash, do the fallback:
old_expected = security_hash(request, form)
return constant_time_compare(token, old_expected)
else:
return False
@method_decorator(csrf_protect)
def __call__(self, request, *args, **kwargs):
"""
Main method that does all the hard work, conforming to the Django view
interface.
"""
if 'extra_context' in kwargs:
self.extra_context.update(kwargs['extra_context'])
current_step = self.determine_step(request, *args, **kwargs)
self.parse_params(request, *args, **kwargs)
# Sanity check.
if current_step >= self.num_steps():
raise Http404('Step %s does not exist' % current_step)
# Validate and process all the previous forms before instantiating the
# current step's form in case self.process_step makes changes to
# self.form_list.
# If any of them fails validation, that must mean the validator relied
# on some other input, such as an external Web site.
# It is also possible that alidation might fail under certain attack
# situations: an attacker might be able to bypass previous stages, and
# generate correct security hashes for all the skipped stages by virtue
# of:
# 1) having filled out an identical form which doesn't have the
# validation (and does something different at the end),
# 2) or having filled out a previous version of the same form which
# had some validation missing,
# 3) or previously having filled out the form when they had more
# privileges than they do now.
#
# Since the hashes only take into account values, and not other other
# validation the form might do, we must re-do validation now for
# security reasons.
previous_form_list = []
for i in range(current_step):
f = self.get_form(i, request.POST)
if not self._check_security_hash(request.POST.get("hash_%d" % i, ''),
request, f):
return self.render_hash_failure(request, i)
if not f.is_valid():
return self.render_revalidation_failure(request, i, f)
else:
self.process_step(request, f, i)
previous_form_list.append(f)
# Process the current step. If it's valid, go to the next step or call
# done(), depending on whether any steps remain.
if request.method == 'POST':
form = self.get_form(current_step, request.POST)
else:
form = self.get_form(current_step)
if form.is_valid():
self.process_step(request, form, current_step)
next_step = current_step + 1
if next_step == self.num_steps():
return self.done(request, previous_form_list + [form])
else:
form = self.get_form(next_step)
self.step = current_step = next_step
return self.render(form, request, current_step)
def render(self, form, request, step, context=None):
"Renders the given Form object, returning an HttpResponse."
old_data = request.POST
prev_fields = []
if old_data:
hidden = forms.HiddenInput()
# Collect all data from previous steps and render it as HTML hidden fields.
for i in range(step):
old_form = self.get_form(i, old_data)
hash_name = 'hash_%s' % i
prev_fields.extend([bf.as_hidden() for bf in old_form])
prev_fields.append(hidden.render(hash_name, old_data.get(hash_name, self.security_hash(request, old_form))))
return self.render_template(request, form, ''.join(prev_fields), step, context)
# METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ########################
def prefix_for_step(self, step):
"Given the step, returns a Form prefix to use."
return str(step)
def render_hash_failure(self, request, step):
"""
Hook for rendering a template if a hash check failed.
step is the step that failed. Any previous step is guaranteed to be
valid.
This default implementation simply renders the form for the given step,
but subclasses may want to display an error message, etc.
"""
return self.render(self.get_form(step), request, step, context={'wizard_error': _('We apologize, but your form has expired. Please continue filling out the form from this page.')})
def render_revalidation_failure(self, request, step, form):
"""
Hook for rendering a template if final revalidation failed.
It is highly unlikely that this point would ever be reached, but See
the comment in __call__() for an explanation.
"""
return self.render(form, request, step)
def security_hash(self, request, form):
"""
Calculates the security hash for the given HttpRequest and Form instances.
Subclasses may want to take into account request-specific information,
such as the IP address.
"""
return form_hmac(form)
def determine_step(self, request, *args, **kwargs):
"""
Given the request object and whatever *args and **kwargs were passed to
__call__(), returns the current step (which is zero-based).
Note that the result should not be trusted. It may even be a completely
invalid number. It's not the job of this method to validate it.
"""
if not request.POST:
return 0
try:
step = int(request.POST.get(self.step_field_name, 0))
except ValueError:
return 0
return step
def parse_params(self, request, *args, **kwargs):
"""
Hook for setting some state, given the request object and whatever
*args and **kwargs were passed to __call__(), sets some state.
This is called at the beginning of __call__().
"""
pass
def get_template(self, step):
"""
Hook for specifying the name of the template to use for a given step.
Note that this can return a tuple of template names if you'd like to
use the template system's select_template() hook.
"""
return 'forms/wizard.html'
def render_template(self, request, form, previous_fields, step, context=None):
"""
Renders the template for the given step, returning an HttpResponse object.
Override this method if you want to add a custom context, return a
different MIME type, etc. If you only need to override the template
name, use get_template() instead.
The template will be rendered with the following context:
step_field -- The name of the hidden field containing the step.
step0 -- The current step (zero-based).
step -- The current step (one-based).
step_count -- The total number of steps.
form -- The Form instance for the current step (either empty
or with errors).
previous_fields -- A string representing every previous data field,
plus hashes for completed forms, all in the form of
hidden fields. Note that you'll need to run this
through the "safe" template filter, to prevent
auto-escaping, because it's raw HTML.
"""
context = context or {}
context.update(self.extra_context)
return render_to_response(self.get_template(step), dict(context,
step_field=self.step_field_name,
step0=step,
step=step + 1,
step_count=self.num_steps(),
form=form,
previous_fields=previous_fields
), context_instance=RequestContext(request))
def process_step(self, request, form, step):
"""
Hook for modifying the FormWizard's internal state, given a fully
validated Form object. The Form is guaranteed to have clean, valid
data.
This method should *not* modify any of that data. Rather, it might want
to set self.extra_context or dynamically alter self.form_list, based on
previously submitted forms.
Note that this method is called every time a page is rendered for *all*
submitted steps.
"""
pass
# METHODS SUBCLASSES MUST OVERRIDE ########################################
def done(self, request, form_list):
"""
Hook for doing something with the validated data. This is responsible
for the final processing.
form_list is a list of Form instances, each containing clean, valid
data.
"""
raise NotImplementedError("Your %s class has not defined a done() method, which is required." % self.__class__.__name__)
| rimbalinux/MSISDNArea | django/contrib/formtools/wizard.py | Python | bsd-3-clause | 12,391 |
#!/usr/bin/env /usr/bin/python
#
# Copyright 2004 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
# This module starts the atsc processing chain taking the captured
# off-air signal created with:
#
# usrp_rx_cfile.py -R <side with tuner, a or b>
# -d 10 set decimation to get signal at 6.4e6 rate
# -f <center of tv signal channel freq>
# -g <appropriate gain for best signal / noise>
#
# All this module does is multiply the sample rate by 3, from 6.4e6 to
# 19.2e6 complex samples / sec, then lowpass filter with a cutoff of 3.2MHz
# and a transition band width of .5MHz. Center of the tv channels is
# then at 0 with edges at -3.2MHz and 3.2MHz.
from gnuradio import gr
import sys
def graph (args):
nargs = len (args)
if nargs == 1:
infile = args[0]
else:
sys.stderr.write('usage: interp.py input_file\n')
sys.exit (1)
sampling_freq = 6400000
fg = gr.flow_graph ()
src0 = gr.file_source (gr.sizeof_gr_complex,infile)
src1 = gr.sig_source_c (sampling_freq, gr.GR_CONST_WAVE, 1, 0)
src2 = gr.sig_source_c (sampling_freq, gr.GR_CONST_WAVE, 1, 0)
interlv = gr.interleave(gr.sizeof_gr_complex)
lp_coeffs = gr.firdes.low_pass ( 3, 19.2e6, 3.2e6, .5e6, gr.firdes.WIN_HAMMING )
lp = gr.fir_filter_ccf ( 1, lp_coeffs )
file = gr.file_sink(gr.sizeof_gr_complex,"/tmp/atsc_pipe_1")
fg.connect( src0, (interlv, 0) )
fg.connect( src1, (interlv, 1) )
fg.connect( src2, (interlv, 2) )
fg.connect( interlv, lp, file )
fg.start()
raw_input ('Head End: Press Enter to stop')
fg.stop()
if __name__ == '__main__':
graph (sys.argv[1:])
| trnewman/VT-USRP-daughterboard-drivers_python | gr-atsc/src/python/interp.py | Python | gpl-3.0 | 2,405 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import diaper
import fauxfactory
import hashlib
import iso8601
import random
import re
import command
import yaml
from contextlib import closing
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django.core.mail import send_mail
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from celery import chain, chord, shared_task
from celery.exceptions import MaxRetriesExceededError
from datetime import datetime, timedelta
from functools import wraps
from lxml import etree
from novaclient.exceptions import OverLimit as OSOverLimit
from paramiko import SSHException
from urllib2 import urlopen, HTTPError
import socket
from appliances.models import (
Provider, Group, Template, Appliance, AppliancePool, DelayedProvisionTask,
MismatchVersionMailer, User, GroupShepherd)
from sprout import settings, redis
from sprout.irc_bot import send_message
from sprout.log import create_logger
from cfme.utils import conf
from cfme.utils.appliance import Appliance as CFMEAppliance
from cfme.utils.path import project_path
from cfme.utils.timeutil import parsetime
from cfme.utils.trackerbot import api, depaginate, parse_template
from cfme.utils.version import Version
from cfme.utils.wait import wait_for
LOCK_EXPIRE = 60 * 15 # 15 minutes
VERSION_REGEXPS = [
r"^cfme-(\d)(\d)(\d)(\d)(\d{2})", # 1.2.3.4.11
# newer format
r"cfme-(\d)(\d)(\d)[.](\d{2})-", # cfme-524.02- -> 5.2.4.2
r"cfme-(\d)(\d)(\d)[.](\d{2})[.](\d)-", # cfme-524.02.1- -> 5.2.4.2.1
# 4 digits
r"cfme-(?:nightly-)?(\d)(\d)(\d)(\d)-", # cfme-5242- -> 5.2.4.2
r"cfme-(\d)(\d)(\d)-(\d)-", # cfme-520-1- -> 5.2.0.1
# 5 digits (not very intelligent but no better solution so far)
r"cfme-(?:nightly-)?(\d)(\d)(\d)(\d{2})-", # cfme-53111- -> 5.3.1.11, cfme-53101 -> 5.3.1.1
]
VERSION_REGEXPS = map(re.compile, VERSION_REGEXPS)
VERSION_REGEXP_UPSTREAM = re.compile(r'^miq-stable-([^-]+)-')
TRACKERBOT_PAGINATE = 100
def retrieve_cfme_appliance_version(template_name):
"""If possible, retrieve the appliance's version from template's name."""
for regexp in VERSION_REGEXPS:
match = regexp.search(template_name)
if match is not None:
return ".".join(map(str, map(int, match.groups())))
else:
match = VERSION_REGEXP_UPSTREAM.search(template_name)
if match is not None:
return match.groups()[0]
def trackerbot():
return api(trackerbot_url=settings.HUBBER_URL.rstrip('/') + '/api/')
def none_dict(l):
""""If the parameter passed is None, returns empty dict. Otherwise it passes through"""
if l is None:
return {}
else:
return l
def provider_error_logger():
return create_logger("provider_errors")
def logged_task(*args, **kwargs):
kwargs["bind"] = True
def f(task):
@wraps(task)
def wrapped_task(self, *args, **kwargs):
self.logger = create_logger(task)
try:
return task(self, *args, **kwargs)
except Exception as e:
self.logger.error(
"An exception occured when executing with args: %r kwargs: %r",
args, kwargs)
self.logger.exception(e)
raise
return shared_task(*args, **kwargs)(wrapped_task)
return f
def singleton_task(*args, **kwargs):
kwargs["bind"] = True
wait = kwargs.pop('wait', False)
wait_countdown = kwargs.pop('wait_countdown', 10)
wait_retries = kwargs.pop('wait_retries', 30)
def f(task):
@wraps(task)
def wrapped_task(self, *args, **kwargs):
self.logger = create_logger(task)
# Create hash of all args
digest_base = "/".join(str(arg) for arg in args)
keys = sorted(kwargs.keys())
digest_base += "//" + "/".join("{}={}".format(key, kwargs[key]) for key in keys)
digest = hashlib.sha256(digest_base).hexdigest()
lock_id = '{0}-lock-{1}'.format(self.name, digest)
if cache.add(lock_id, 'true', LOCK_EXPIRE):
try:
return task(self, *args, **kwargs)
except Exception as e:
self.logger.error(
"An exception occured when executing with args: %r kwargs: %r",
args, kwargs)
self.logger.exception(e)
raise
finally:
cache.delete(lock_id)
elif wait:
self.logger.info("Waiting for another instance of the task to end.")
self.retry(args=args, countdown=wait_countdown, max_retries=wait_retries)
return shared_task(*args, **kwargs)(wrapped_task)
return f
@singleton_task()
def kill_unused_appliances(self):
"""This is the watchdog, that guards the appliances that were given to users. If you forget
to prolong the lease time, this is the thing that will take the appliance off your hands
and kill it."""
with transaction.atomic():
for appliance in Appliance.objects.filter(marked_for_deletion=False):
if appliance.leased_until is not None and appliance.leased_until <= timezone.now():
self.logger.info("Watchdog found an appliance that is to be deleted: {}/{}".format(
appliance.id, appliance.name))
kill_appliance.delay(appliance.id)
@singleton_task()
def kill_appliance(self, appliance_id, replace_in_pool=False, minutes=60):
"""As-reliable-as-possible appliance deleter. Turns off, deletes the VM and deletes the object.
If the appliance was assigned to pool and we want to replace it, redo the provisioning.
"""
self.logger.info("Initiated kill of appliance {}".format(appliance_id))
appliance = Appliance.objects.get(id=appliance_id)
workflow = [
disconnect_direct_lun.si(appliance_id),
appliance_power_off.si(appliance_id),
kill_appliance_delete.si(appliance_id),
]
if replace_in_pool:
if appliance.appliance_pool is not None:
workflow.append(
replace_clone_to_pool.si(
appliance.template.version, appliance.template.date,
appliance.appliance_pool.id, minutes, appliance.template.id))
workflow = chain(*workflow)
workflow()
@singleton_task()
def kill_appliance_delete(self, appliance_id, _delete_already_issued=False):
delete_issued = False
try:
appliance = Appliance.objects.get(id=appliance_id)
if not appliance.provider.is_working:
raise RuntimeError('Provider is not working.')
if appliance.provider_api.does_vm_exist(appliance.name):
appliance.set_status("Deleting the appliance from provider")
# If we haven't issued the delete order, do it now
if not _delete_already_issued:
appliance.provider_api.delete_vm(appliance.name)
delete_issued = True
# In any case, retry to wait for the VM to be deleted, but next time do not issue delete
self.retry(args=(appliance_id, True), countdown=5, max_retries=60)
appliance.delete()
except ObjectDoesNotExist:
# Appliance object already not there
return
except Exception as e:
try:
appliance.set_status("Could not delete appliance. Retrying.")
except UnboundLocalError:
return # The appliance is not there any more
# In case of error retry, and also specify whether the delete order was already issued
self.retry(
args=(appliance_id, _delete_already_issued or delete_issued),
exc=e, countdown=5, max_retries=60)
@singleton_task()
def poke_trackerbot(self):
"""This beat-scheduled task periodically polls the trackerbot if there are any new templates.
"""
template_usability = []
# Extract data from trackerbot
tbapi = trackerbot()
objects = depaginate(tbapi, tbapi.providertemplate().get(limit=TRACKERBOT_PAGINATE))["objects"]
per_group = {}
for obj in objects:
if obj["template"]["group"]["name"] == 'unknown':
continue
if obj["template"]["group"]["name"] not in per_group:
per_group[obj["template"]["group"]["name"]] = []
per_group[obj["template"]["group"]["name"]].append(obj)
# Sort them using the build date
for group in per_group.keys():
per_group[group] = sorted(
per_group[group],
reverse=True, key=lambda o: o["template"]["datestamp"])
objects = []
# And interleave the the groups
while any(per_group.values()):
for key in per_group.keys():
if per_group[key]:
objects.append(per_group[key].pop(0))
for template in objects:
if template["provider"]["key"] not in conf.cfme_data.management_systems.keys():
# If we don't use that provider in yamls, set the template as not usable
# 1) It will prevent adding this template if not added
# 2) It'll mark the template as unusable if it already exists
template["usable"] = False
template_usability.append(
(
template["provider"]["key"],
template["template"]["name"],
template["usable"]
)
)
if not template["usable"]:
continue
group, create = Group.objects.get_or_create(id=template["template"]["group"]["name"])
# Check if the template is already obsolete
if group.template_obsolete_days is not None:
build_date = parsetime.from_iso_date(template["template"]["datestamp"])
if build_date <= (parsetime.today() - timedelta(days=group.template_obsolete_days)):
# It is already obsolete, so ignore it
continue
provider, create = Provider.objects.get_or_create(id=template["provider"]["key"])
if not provider.is_working:
continue
if "sprout" not in provider.provider_data:
continue
if not provider.provider_data.get("use_for_sprout", False):
continue
if not provider.provider_type:
provider.provider_type = provider.provider_data.get('type')
provider.save(update_fields=['provider_type'])
template_name = template["template"]["name"]
ga_released = template['template']['ga_released']
date = parse_template(template_name).datestamp
# nasty trackerbot slightly corrupts json data and it is parsed in wrong way
# as a result
custom_data = template['template'].get('custom_data', "{}")
processed_custom_data = custom_data.replace("u'", '"').replace("'", '"')
processed_custom_data = yaml.safe_load(processed_custom_data)
if not date:
# Not a CFME/MIQ template, ignore it.
continue
# Original one
original_template = None
try:
original_template = Template.objects.get(
provider=provider, template_group=group, original_name=template_name,
name=template_name, preconfigured=False)
if original_template.ga_released != ga_released:
original_template.ga_released = ga_released
original_template.save(update_fields=['ga_released'])
if provider.provider_type == 'openshift':
if original_template.custom_data != custom_data:
original_template.custom_data = processed_custom_data
original_template.template_type = Template.OPENSHIFT_POD
original_template.container = 'cloudforms-0'
original_template.save(update_fields=['custom_data',
'container',
'template_type'])
except ObjectDoesNotExist:
if template_name in provider.templates:
date = parse_template(template_name).datestamp
if date is None:
self.logger.warning(
"Ignoring template {} because it does not have a date!".format(
template_name))
continue
template_version = retrieve_cfme_appliance_version(template_name)
if template_version is None:
# Make up a faux version
# First 3 fields of version get parsed as a zstream
# therefore ... makes it a "nil" stream
template_version = "...{}".format(date.strftime("%Y%m%d"))
with transaction.atomic():
tpl = Template(
provider=provider, template_group=group, original_name=template_name,
name=template_name, preconfigured=False, date=date, ready=True, exists=True,
usable=True, version=template_version)
tpl.save()
if provider.provider_type == 'openshift':
tpl.custom_data = processed_custom_data
tpl.container = 'cloudforms-0'
tpl.template_type = Template.OPENSHIFT_POD
tpl.save(update_fields=['container', 'template_type', 'custom_data'])
original_template = tpl
self.logger.info("Created a new template #{}".format(tpl.id))
# If the provider is set to not preconfigure templates, do not bother even doing it.
if provider.num_simultaneous_configuring > 0 and provider.provider_type != 'openshift':
# Preconfigured one
try:
preconfigured_template = Template.objects.get(
provider=provider, template_group=group, original_name=template_name,
preconfigured=True)
if preconfigured_template.ga_released != ga_released:
preconfigured_template.ga_released = ga_released
preconfigured_template.save(update_fields=['ga_released'])
except ObjectDoesNotExist:
if template_name in provider.templates:
original_id = original_template.id if original_template is not None else None
create_appliance_template.delay(
provider.id, group.id, template_name, source_template_id=original_id)
# If any of the templates becomes unusable, let sprout know about it
# Similarly if some of them becomes usable ...
for provider_id, template_name, usability in template_usability:
provider, create = Provider.objects.get_or_create(id=provider_id)
if not provider.working or provider.disabled:
continue
with transaction.atomic():
for template in Template.objects.filter(provider=provider, original_name=template_name):
template.usable = usability
template.save(update_fields=['usable'])
# Kill all shepherd appliances if they were acidentally spun up
if not usability:
for appliance in Appliance.objects.filter(
template=template, marked_for_deletion=False,
appliance_pool=None):
self.logger.info(
'Killing an appliance {}/{} because its template was marked as unusable'
.format(appliance.id, appliance.name))
Appliance.kill(appliance)
@logged_task()
def create_appliance_template(self, provider_id, group_id, template_name, source_template_id=None):
"""This task creates a template from a fresh CFME template. In case of fatal error during the
operation, the template object is deleted to make sure the operation will be retried next time
when poke_trackerbot runs."""
provider = Provider.objects.get(id=provider_id, working=True, disabled=False)
provider.cleanup() # Precaution
group = Group.objects.get(id=group_id)
with transaction.atomic():
# Limit the number of concurrent template configurations
if provider.remaining_configuring_slots == 0:
return False # It will be kicked again when trackerbot gets poked
try:
Template.objects.get(
template_group=group, provider=provider, original_name=template_name,
preconfigured=True)
return False
except ObjectDoesNotExist:
pass
# Fire off the template preparation
date = parse_template(template_name).datestamp
if not date:
return
template_version = retrieve_cfme_appliance_version(template_name)
if template_version is None:
# Make up a faux version
# First 3 fields of version get parsed as a zstream
# therefore ... makes it a "nil" stream
template_version = "...{}".format(date.strftime("%Y%m%d"))
new_template_name = settings.TEMPLATE_FORMAT.format(
group=group.id, date=date.strftime("%y%m%d"), rnd=fauxfactory.gen_alphanumeric(8))
if provider.template_name_length is not None:
allowed_length = provider.template_name_length
# There is some limit
if len(new_template_name) > allowed_length:
# Cut it down
randoms_length = len(new_template_name.rsplit("_", 1)[-1])
minimum_length = (len(new_template_name) - randoms_length) + 1 # One random must be
if minimum_length <= allowed_length:
# Just cut it
new_template_name = new_template_name[:allowed_length]
else:
# Another solution
new_template_name = settings.TEMPLATE_FORMAT.format(
group=group.id[:2], date=date.strftime("%y%m%d"), # Use only first 2 of grp
rnd=fauxfactory.gen_alphanumeric(2)) # And just 2 chars random
# TODO: If anything larger comes, do fix that!
if source_template_id is not None:
try:
source_template = Template.objects.get(id=source_template_id)
except ObjectDoesNotExist:
source_template = None
else:
source_template = None
template = Template(
provider=provider, template_group=group, name=new_template_name, date=date,
version=template_version, original_name=template_name, parent_template=source_template)
template.save()
workflow = chain(
prepare_template_deploy.si(template.id),
prepare_template_verify_version.si(template.id),
prepare_template_configure.si(template.id),
prepare_template_seal.si(template.id),
prepare_template_poweroff.si(template.id),
prepare_template_finish.si(template.id),
)
workflow.link_error(prepare_template_delete_on_error.si(template.id))
workflow()
@singleton_task()
def prepare_template_deploy(self, template_id):
template = Template.objects.get(id=template_id)
try:
if not template.provider.is_working:
raise RuntimeError('Provider is not working.')
if not template.exists_in_provider:
template.set_status("Deploying the template.")
provider_data = template.provider.provider_data
kwargs = provider_data["sprout"]
kwargs["power_on"] = True
if "allowed_datastores" not in kwargs and "allowed_datastores" in provider_data:
kwargs["allowed_datastores"] = provider_data["allowed_datastores"]
self.logger.info("Deployment kwargs: {}".format(repr(kwargs)))
template.provider_api.deploy_template(
template.original_name, vm_name=template.name, **kwargs)
else:
template.set_status("Waiting for deployment to be finished.")
template.provider_api.wait_vm_running(template.name)
except Exception as e:
template.set_status(
"Could not properly deploy the template. Retrying. {}: {}".format(
type(e).__name__, str(e)))
self.logger.exception(e)
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template deployed.")
@singleton_task()
def prepare_template_verify_version(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Verifying version.")
appliance = CFMEAppliance.from_provider(
template.provider_name, template.name, container=template.container)
appliance.ipapp.wait_for_ssh()
try:
true_version = appliance.version
except Exception as e:
template.set_status("Some SSH error happened during appliance version check.")
self.retry(args=(template_id,), exc=e, countdown=20, max_retries=5)
supposed_version = Version(template.version)
if true_version is None or true_version.vstring == 'master':
return
if true_version != supposed_version:
# Check if the difference is not just in the suffixes, which can be the case ...
t = str(true_version)
s = str(supposed_version)
if supposed_version.version == true_version.version or t.startswith(s):
# The two have same version but different suffixes, apply the suffix to the template obj
# OR also a case - when the supposed version is incomplete so we will use the detected
# version.
with transaction.atomic():
template.version = t
template.save(update_fields=['version'])
if template.parent_template is not None:
# In case we have a parent template, update the version there too.
if template.version != template.parent_template.version:
pt = template.parent_template
pt.version = template.version
pt.save(update_fields=['version'])
return # no need to continue with spamming process
# SPAM SPAM SPAM!
with transaction.atomic():
mismatch_in_db = MismatchVersionMailer.objects.filter(
provider=template.provider,
template_name=template.original_name,
supposed_version=supposed_version,
actual_version=true_version)
if not mismatch_in_db:
mismatch = MismatchVersionMailer(
provider=template.provider,
template_name=template.original_name,
supposed_version=supposed_version,
actual_version=true_version)
mismatch.save()
# Run the task to mail the problem
mailer_version_mismatch.delay()
raise Exception("Detected version mismatch!")
@singleton_task()
def prepare_template_configure(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Customization started.")
appliance = CFMEAppliance.from_provider(
template.provider_name, template.name, container=template.container)
try:
appliance.configure(
setup_fleece=False,
log_callback=lambda s: template.set_status("Customization progress: {}".format(s)),
on_openstack=template.provider.provider_data.get('type', None) == 'openstack')
except Exception as e:
template.set_status("Could not properly configure the CFME. Retrying.")
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template configuration was done.")
@singleton_task()
def prepare_template_seal(self, template_id):
template = Template.objects.get(id=template_id)
template.set_status("Sealing template.")
try:
template.cfme.ipapp.seal_for_templatizing()
except Exception as e:
template.set_status("Could not seal the template. Retrying.")
self.retry(
args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template sealed.")
@singleton_task()
def prepare_template_poweroff(self, template_id):
template = Template.objects.get(id=template_id)
try:
if not template.provider.is_working:
raise RuntimeError('Provider is not working.')
template.set_status("Powering off")
template.provider_api.stop_vm(template.name)
template.provider_api.wait_vm_stopped(template.name)
except Exception as e:
template.set_status("Could not power off the appliance. Retrying.")
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Powered off.")
@singleton_task()
def prepare_template_finish(self, template_id):
template = Template.objects.get(id=template_id)
try:
if not template.provider.is_working:
raise RuntimeError('Provider is not working.')
template.set_status("Finishing template creation.")
if template.temporary_name is None:
tmp_name = "templatize_{}".format(fauxfactory.gen_alphanumeric(8))
Template.objects.get(id=template_id).temporary_name = tmp_name # metadata, autosave
else:
tmp_name = template.temporary_name
template.provider_api.mark_as_template(
template.name, temporary_name=tmp_name, delete_on_error=False)
with transaction.atomic():
template = Template.objects.get(id=template_id)
template.ready = True
template.exists = True
template.save(update_fields=['ready', 'exists'])
del template.temporary_name
except Exception as e:
template.set_status("Could not mark the appliance as template. Retrying.")
self.retry(args=(template_id,), exc=e, countdown=10, max_retries=5)
else:
template.set_status("Template preparation finished.")
@singleton_task()
def prepare_template_delete_on_error(self, template_id):
try:
template = Template.objects.get(id=template_id)
except ObjectDoesNotExist:
return True
try:
if not template.provider.is_working:
raise RuntimeError('Provider is not working.')
template.set_status("Template creation failed. Deleting it.")
if template.provider_api.does_vm_exist(template.name):
template.provider_api.delete_vm(template.name)
wait_for(template.provider_api.does_vm_exist, [template.name], timeout='5m', delay=10)
if template.provider_api.does_template_exist(template.name):
template.provider_api.delete_template(template.name)
wait_for(
template.provider_api.does_template_exist, [template.name], timeout='5m', delay=10)
if (template.temporary_name is not None and
template.provider_api.does_template_exist(template.temporary_name)):
template.provider_api.delete_template(template.temporary_name)
wait_for(
template.provider_api.does_template_exist,
[template.temporary_name], timeout='5m', delay=10)
template.delete()
except Exception as e:
self.retry(args=(template_id,), exc=e, countdown=60, max_retries=5)
@logged_task()
def request_appliance_pool(self, appliance_pool_id, time_minutes):
"""This task gives maximum possible amount of spinned-up appliances to the specified pool and
then if there is need to spin up another appliances, it spins them up via clone_template_to_pool
task."""
self.logger.info(
"Appliance pool {} requested for {} minutes.".format(appliance_pool_id, time_minutes))
pool = AppliancePool.objects.get(id=appliance_pool_id)
n = Appliance.give_to_pool(pool)
for i in range(pool.total_count - n):
tpls = pool.possible_provisioning_templates
if tpls:
template_id = tpls[0].id
clone_template_to_pool(template_id, pool.id, time_minutes)
else:
with transaction.atomic():
task = DelayedProvisionTask(pool=pool, lease_time=time_minutes)
task.save()
apply_lease_times_after_pool_fulfilled.delay(appliance_pool_id, time_minutes)
@singleton_task()
def apply_lease_times_after_pool_fulfilled(self, appliance_pool_id, time_minutes):
pool = AppliancePool.objects.get(id=appliance_pool_id)
if pool.fulfilled:
for appliance in pool.appliances:
apply_lease_times.delay(appliance.id, time_minutes)
rename_appliances_for_pool.delay(pool.id)
with transaction.atomic():
pool.finished = True
pool.save(update_fields=['finished'])
else:
# Look whether we can swap any provisioning appliance with some in shepherd
unfinished = list(
Appliance.objects.filter(
appliance_pool=pool, ready=False, marked_for_deletion=False).all())
random.shuffle(unfinished)
if len(unfinished) > 0:
n = Appliance.give_to_pool(pool, len(unfinished))
with transaction.atomic():
for _ in range(n):
appl = unfinished.pop()
appl.appliance_pool = None
appl.save(update_fields=['appliance_pool'])
try:
self.retry(args=(appliance_pool_id, time_minutes), countdown=30, max_retries=120)
except MaxRetriesExceededError: # Bad luck, pool fulfillment failed. So destroy it.
pool.logger.error("Waiting for fulfillment failed. Initiating the destruction process.")
pool.kill()
@singleton_task()
def process_delayed_provision_tasks(self):
"""This picks up the provisioning tasks that were delayed due to ocncurrency limit of provision.
Goes one task by one and when some of them can be provisioned, it starts the provisioning and
then deletes the task.
"""
for task in DelayedProvisionTask.objects.order_by("id"):
if task.pool.not_needed_anymore:
task.delete()
continue
# Try retrieve from shepherd
appliances_given = Appliance.give_to_pool(task.pool, 1)
if appliances_given == 0:
# No free appliance in shepherd, so do it on our own
tpls = task.pool.possible_provisioning_templates
if task.provider_to_avoid is not None:
filtered_tpls = filter(lambda tpl: tpl.provider != task.provider_to_avoid, tpls)
if filtered_tpls:
# There are other providers to provision on, so try one of them
tpls = filtered_tpls
# If there is no other provider to provision on, we will use the original list.
# This will cause additional rejects until the provider quota is met
if tpls:
clone_template_to_pool(tpls[0].id, task.pool.id, task.lease_time)
task.delete()
else:
# Try freeing up some space in provider
for provider in task.pool.possible_providers:
appliances = provider.free_shepherd_appliances.exclude(
**task.pool.appliance_filter_params)
if appliances:
appl = random.choice(appliances)
self.logger.info(
'Freeing some space in provider by killing appliance {}/{}'
.format(appl.id, appl.name))
Appliance.kill(appl)
break # Just one
else:
# There was a free appliance in shepherd, so we took it and we don't need this task more
task.delete()
@logged_task()
def replace_clone_to_pool(
self, version, date, appliance_pool_id, time_minutes, exclude_template_id):
appliance_pool = AppliancePool.objects.get(id=appliance_pool_id)
if appliance_pool.not_needed_anymore:
return
exclude_template = Template.objects.get(id=exclude_template_id)
templates = appliance_pool.possible_templates
templates_excluded = filter(lambda tpl: tpl != exclude_template, templates)
if templates_excluded:
template = random.choice(templates_excluded)
else:
template = exclude_template # :( no other template to use
clone_template_to_pool(template.id, appliance_pool_id, time_minutes)
def clone_template_to_pool(template_id, appliance_pool_id, time_minutes):
template = Template.objects.get(id=template_id)
if template.template_type != Template.OPENSHIFT_POD:
appliance_format = settings.APPLIANCE_FORMAT
else:
appliance_format = settings.OPENSHIFT_APPLIANCE_FORMAT
new_appliance_name = appliance_format.format(
group=template.template_group.id,
date=template.date.strftime("%y%m%d"),
rnd=fauxfactory.gen_alphanumeric(8).lower())
with transaction.atomic():
pool = AppliancePool.objects.get(id=appliance_pool_id)
if pool.not_needed_anymore:
return
# Apply also username
new_appliance_name = "{}_{}".format(pool.owner.username, new_appliance_name)
if template.template_type == Template.OPENSHIFT_POD:
# openshift doesn't allow underscores to be used in project names
new_appliance_name = new_appliance_name.replace('_', '-')
appliance = Appliance(template=template, name=new_appliance_name, appliance_pool=pool)
appliance.save()
# Set pool to these params to keep the appliances with same versions/dates
pool.version = template.version
pool.date = template.date
pool.save(update_fields=['version', 'date'])
clone_template_to_appliance.delay(appliance.id, time_minutes, pool.yum_update)
@logged_task()
def apply_lease_times(self, appliance_id, time_minutes):
self.logger.info(
"Applying lease time {} minutes on appliance {}".format(time_minutes, appliance_id))
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.datetime_leased = timezone.now()
appliance.leased_until = appliance.datetime_leased + timedelta(minutes=int(time_minutes))
appliance.save(update_fields=['datetime_leased', 'leased_until'])
@logged_task()
def clone_template(self, template_id):
self.logger.info("Cloning template {}".format(template_id))
template = Template.objects.get(id=template_id)
new_appliance_name = settings.APPLIANCE_FORMAT.format(
group=template.template_group.id,
date=template.date.strftime("%y%m%d"),
rnd=fauxfactory.gen_alphanumeric(8))
appliance = Appliance(template=template, name=new_appliance_name)
appliance.save()
clone_template_to_appliance.delay(appliance.id)
@singleton_task()
def clone_template_to_appliance(self, appliance_id, lease_time_minutes=None, yum_update=False):
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_status("Beginning deployment process")
tasks = [
clone_template_to_appliance__clone_template.si(appliance_id, lease_time_minutes),
clone_template_to_appliance__wait_present.si(appliance_id),
appliance_power_on.si(appliance_id),
]
if yum_update:
tasks.append(appliance_yum_update.si(appliance_id))
tasks.append(appliance_reboot.si(appliance_id, if_needs_restarting=True))
if appliance.preconfigured:
tasks.append(wait_appliance_ready.si(appliance_id))
else:
tasks.append(mark_appliance_ready.si(appliance_id))
workflow = chain(*tasks)
if Appliance.objects.get(id=appliance_id).appliance_pool is not None:
# Case of the appliance pool
if Appliance.objects.get(id=appliance_id).appliance_pool.not_needed_anymore:
return
# TODO: Make replace_in_pool work again
workflow.link_error(
kill_appliance.si(appliance_id, replace_in_pool=False, minutes=lease_time_minutes))
else:
# Case of shepherd
workflow.link_error(kill_appliance.si(appliance_id))
workflow()
@singleton_task()
def clone_template_to_appliance__clone_template(self, appliance_id, lease_time_minutes):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present, terminating the chain
self.request.callbacks[:] = []
return
if appliance.appliance_pool is not None:
if appliance.appliance_pool.not_needed_anymore:
# Terminate task chain
self.request.callbacks[:] = []
kill_appliance.delay(appliance_id)
return
if not appliance.provider.is_working:
raise RuntimeError('Provider is not working.')
try:
appliance.provider.cleanup()
if not appliance.provider_api.does_vm_exist(appliance.name):
appliance.set_status("Beginning template clone.")
provider_data = appliance.template.provider.provider_data
kwargs = provider_data["sprout"]
kwargs["power_on"] = False
if "allowed_datastores" not in kwargs and "allowed_datastores" in provider_data:
kwargs["allowed_datastores"] = provider_data["allowed_datastores"]
if appliance.appliance_pool is not None:
if appliance.appliance_pool.override_memory is not None:
kwargs['ram'] = appliance.appliance_pool.override_memory
if appliance.appliance_pool.override_cpu is not None:
kwargs['cpu'] = appliance.appliance_pool.override_cpu
if appliance.is_openshift and appliance.template.custom_data:
kwargs['tags'] = appliance.template.custom_data.get('TAGS')
vm_data = appliance.provider_api.deploy_template(appliance.template.name,
vm_name=appliance.name, progress_callback=lambda progress:
appliance.set_status("Deploy progress: {}".format(progress)), **kwargs)
if appliance.is_openshift:
with transaction.atomic():
appliance.openshift_ext_ip = vm_data['external_ip']
appliance.openshift_project = vm_data['project']
appliance.ip_address = vm_data['url']
appliance.save(update_fields=['openshift_ext_ip',
'openshift_project',
'ip_address'])
except Exception as e:
messages = {"limit", "cannot add", "quota"}
if isinstance(e, OSOverLimit):
appliance.set_status("Hit OpenStack provisioning quota, trying putting it aside ...")
elif any(message in str(e).lower() for message in messages):
appliance.set_status("Provider has some troubles, putting it aside ... {}/{}".format(
type(e).__name__, str(e)
))
provider_error_logger().exception(e)
else:
# Something got screwed really bad
appliance.set_status("Error happened: {}({})".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id, lease_time_minutes), exc=e, countdown=60, max_retries=5)
# Ignore that and provision it somewhere else
if appliance.appliance_pool:
# We can put it aside for a while to wait for
self.request.callbacks[:] = [] # Quit this chain
pool = appliance.appliance_pool
try:
if appliance.provider_api.does_vm_exist(appliance.name):
# Better to check it, you never know when does that fail
appliance.provider_api.delete_vm(appliance.name)
wait_for(
appliance.provider_api.does_vm_exist,
[appliance.name], timeout='5m', delay=10)
except:
pass # Diaper here
appliance.delete(do_not_touch_ap=True)
with transaction.atomic():
new_task = DelayedProvisionTask(
pool=pool, lease_time=lease_time_minutes,
provider_to_avoid=appliance.template.provider)
new_task.save()
return
else:
# We cannot put it aside, so just try that again
self.retry(args=(appliance_id, lease_time_minutes), exc=e, countdown=60, max_retries=5)
else:
appliance.set_status("Template cloning finished. Refreshing provider VMs to get UUID.")
refresh_appliances_provider.delay(appliance.provider.id)
@singleton_task()
def clone_template_to_appliance__wait_present(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present, terminating the chain
self.request.callbacks[:] = []
return
if not appliance.provider.is_working:
raise RuntimeError('Provider is not working.')
if appliance.appliance_pool is not None:
if appliance.appliance_pool.not_needed_anymore:
# Terminate task chain
self.request.callbacks[:] = []
kill_appliance.delay(appliance_id)
return
try:
appliance.set_status("Waiting for the appliance to become visible in provider.")
if not appliance.provider_api.does_vm_exist(appliance.name):
self.retry(args=(appliance_id,), countdown=20, max_retries=30)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id,), exc=e, countdown=20, max_retries=30)
else:
appliance.set_status("Template was successfully cloned.")
with diaper:
appliance.synchronize_metadata()
@singleton_task()
def mark_appliance_ready(self, appliance_id):
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ready = True
appliance.save(update_fields=['ready'])
Appliance.objects.get(id=appliance_id).set_status("Appliance was marked as ready")
@singleton_task()
def appliance_power_on(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if not appliance.provider.is_working:
raise RuntimeError('Provider is not working.')
if appliance.provider_api.is_vm_running(appliance.name):
try:
current_ip = appliance.provider_api.current_ip_address(appliance.name)
except Exception:
current_ip = None
if current_ip is not None:
# IP present
Appliance.objects.get(id=appliance_id).set_status("Appliance was powered on.")
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
if not appliance.is_openshift:
appliance.ip_address = current_ip
appliance.set_power_state(Appliance.Power.ON)
appliance.save()
if appliance.containerized and not appliance.is_openshift:
with appliance.ipapp.ssh_client as ssh:
# Fire up the container
ssh.run_command('cfme-start', ensure_host=True)
# VM is running now.
sync_appliance_hw.delay(appliance.id)
sync_provider_hw.delay(appliance.template.provider.id)
# fixes time synchronization
if not appliance.is_openshift:
appliance.ipapp.fix_ntp_clock()
return
else:
# IP not present yet
Appliance.objects.get(id=appliance_id).set_status("Appliance waiting for IP.")
self.retry(args=(appliance_id, ), countdown=20, max_retries=40)
elif not appliance.provider_api.in_steady_state(appliance.name):
appliance.set_status("Waiting for appliance to be steady (current state: {}).".format(
appliance.provider_api.vm_status(appliance.name)))
self.retry(args=(appliance_id, ), countdown=20, max_retries=40)
else:
appliance.set_status("Powering on.")
appliance.provider_api.start_vm(appliance.name)
self.retry(args=(appliance_id, ), countdown=20, max_retries=40)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id, ), exc=e, countdown=20, max_retries=30)
@singleton_task()
def appliance_reboot(self, appliance_id, if_needs_restarting=False):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if if_needs_restarting:
with appliance.ssh as ssh:
if int(ssh.run_command("needs-restarting | wc -l").output.strip()) == 0:
return # No reboot needed
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.REBOOTING)
appliance.save()
appliance.ipapp.reboot(wait_for_web_ui=False, log_callback=appliance.set_status)
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.ON)
appliance.save()
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id, ), exc=e, countdown=20, max_retries=30)
@singleton_task()
def appliance_power_off(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if not appliance.provider.is_working:
raise RuntimeError('Provider is not working.')
api = appliance.provider_api
if not api.does_vm_exist(appliance.name) or api.is_vm_stopped(appliance.name):
Appliance.objects.get(id=appliance_id).set_status("Appliance was powered off.")
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.OFF)
appliance.ready = False
appliance.save()
sync_provider_hw.delay(appliance.template.provider.id)
return
elif api.is_vm_suspended(appliance.name):
appliance.set_status("Starting appliance from suspended state to properly off it.")
api.start_vm(appliance.name)
self.retry(args=(appliance_id,), countdown=20, max_retries=40)
elif not api.in_steady_state(appliance.name):
appliance.set_status("Waiting for appliance to be steady (current state: {}).".format(
api.vm_status(appliance.name)))
self.retry(args=(appliance_id,), countdown=20, max_retries=40)
else:
appliance.set_status("Powering off.")
api.stop_vm(appliance.name)
self.retry(args=(appliance_id,), countdown=20, max_retries=40)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id,), exc=e, countdown=20, max_retries=40)
@singleton_task()
def appliance_suspend(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# source objects are not present
return
try:
if not appliance.provider.is_working:
raise RuntimeError('Provider is not working.')
if appliance.provider_api.is_vm_suspended(appliance.name):
Appliance.objects.get(id=appliance_id).set_status("Appliance was suspended.")
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.set_power_state(Appliance.Power.SUSPENDED)
appliance.ready = False
appliance.save()
sync_provider_hw.delay(appliance.template.provider.id)
return
elif not appliance.provider_api.in_steady_state(appliance.name):
appliance.set_status("Waiting for appliance to be steady (current state: {}).".format(
appliance.provider_api.vm_status(appliance.name)))
self.retry(args=(appliance_id,), countdown=20, max_retries=30)
else:
appliance.set_status("Suspending.")
appliance.provider_api.suspend_vm(appliance.name)
self.retry(args=(appliance_id,), countdown=20, max_retries=30)
except Exception as e:
provider_error_logger().error("Exception {}: {}".format(type(e).__name__, str(e)))
self.retry(args=(appliance_id,), exc=e, countdown=20, max_retries=30)
@singleton_task()
def retrieve_appliance_ip(self, appliance_id):
"""Updates appliance's IP address."""
try:
appliance = Appliance.objects.get(id=appliance_id)
if not appliance.provider.is_working:
raise RuntimeError('Provider is not working.')
appliance.set_status("Retrieving IP address.")
ip_address = appliance.provider_api.current_ip_address(appliance.name)
if ip_address is None:
self.retry(args=(appliance_id,), countdown=30, max_retries=20)
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ip_address = ip_address
appliance.save(update_fields=['ip_address'])
except ObjectDoesNotExist:
# source object is not present, terminating
return
else:
appliance.set_status("IP address retrieved.")
@singleton_task()
def refresh_appliances(self):
"""Dispatches the appliance refresh process among the providers"""
self.logger.info("Initiating regular appliance provider refresh")
for provider in Provider.objects.filter(working=True, disabled=False):
refresh_appliances_provider.delay(provider.id)
@singleton_task(soft_time_limit=180)
def refresh_appliances_provider(self, provider_id):
"""Downloads the list of VMs from the provider, then matches them by name or UUID with
appliances stored in database.
"""
self.logger.info("Refreshing appliances in {}".format(provider_id))
provider = Provider.objects.get(id=provider_id, working=True, disabled=False)
if not hasattr(provider.api, "all_vms"):
# Ignore this provider
return
vms = provider.api.all_vms()
dict_vms = {}
uuid_vms = {}
for vm in vms:
dict_vms[vm.name] = vm
if vm.uuid:
uuid_vms[vm.uuid] = vm
for appliance in Appliance.objects.filter(template__provider=provider):
if appliance.uuid is not None and appliance.uuid in uuid_vms:
vm = uuid_vms[appliance.uuid]
# Using the UUID and change the name if it changed
appliance.name = vm.name
appliance.ip_address = vm.ip
appliance.set_power_state(Appliance.POWER_STATES_MAPPING.get(
vm.power_state, Appliance.Power.UNKNOWN))
appliance.save()
elif appliance.name in dict_vms:
vm = dict_vms[appliance.name]
# Using the name, and then retrieve uuid
appliance.uuid = vm.uuid
appliance.ip_address = vm.ip
appliance.set_power_state(Appliance.POWER_STATES_MAPPING.get(
vm.power_state, Appliance.Power.UNKNOWN))
appliance.save()
self.logger.info("Retrieved UUID for appliance {}/{}: {}".format(
appliance.id, appliance.name, appliance.uuid))
else:
# Orphaned :(
appliance.set_power_state(Appliance.Power.ORPHANED)
appliance.save()
@singleton_task()
def check_templates(self):
self.logger.info("Initiated a periodic template check")
for provider in Provider.objects.filter(disabled=False):
check_templates_in_provider.delay(provider.id)
@singleton_task(soft_time_limit=180)
def check_templates_in_provider(self, provider_id):
self.logger.info("Initiated a periodic template check for {}".format(provider_id))
provider = Provider.objects.get(id=provider_id, disabled=False)
# Get templates and update metadata
try:
templates = map(str, provider.api.list_template())
except Exception as err:
self.logger.warning("Provider will be marked as not working because of %s", err)
provider.working = False
provider.save(update_fields=['working'])
self.retry(args=(provider_id,), countdown=15, max_retries=3)
else:
provider.working = True
provider.save(update_fields=['working'])
with provider.edit_metadata as metadata:
metadata["templates"] = templates
if not provider.working:
return
# Check Sprout template existence
# expiration_time = (timezone.now() - timedelta(**settings.BROKEN_APPLIANCE_GRACE_TIME))
for template in Template.objects.filter(provider=provider):
with transaction.atomic():
tpl = Template.objects.get(pk=template.pk)
exists = tpl.name in templates
tpl.exists = exists
tpl.save(update_fields=['exists'])
# if not exists:
# if len(Appliance.objects.filter(template=template).all()) == 0\
# and template.status_changed < expiration_time:
# # No other appliance is made from this template so no need to keep it
# with transaction.atomic():
# tpl = Template.objects.get(pk=template.pk)
# tpl.delete()
@singleton_task()
def delete_nonexistent_appliances(self):
"""Goes through orphaned appliances' objects and deletes them from the database."""
expiration_time = (timezone.now() - timedelta(**settings.ORPHANED_APPLIANCE_GRACE_TIME))
for appliance in Appliance.objects.filter(ready=True).all():
if appliance.name in redis.renaming_appliances:
continue
if appliance.power_state == Appliance.Power.ORPHANED:
if appliance.power_state_changed > expiration_time:
# Ignore it for now
continue
self.logger.info(
"I will delete orphaned appliance {}/{}".format(appliance.id, appliance.name))
try:
appliance.delete()
except ObjectDoesNotExist as e:
if "AppliancePool" in str(e):
# Someone managed to delete the appliance pool before
appliance.appliance_pool = None
appliance.save(update_fields=['appliance_pool'])
appliance.delete()
else:
raise # No diaper pattern here!
# If something happened to the appliance provisioning process, just delete it to remove
# the garbage. It will be respinned again by shepherd.
# Grace time is specified in BROKEN_APPLIANCE_GRACE_TIME
expiration_time = (timezone.now() - timedelta(**settings.BROKEN_APPLIANCE_GRACE_TIME))
for appliance in Appliance.objects.filter(ready=False, marked_for_deletion=False).all():
if appliance.status_changed < expiration_time:
self.logger.info("Killing broken appliance {}/{}".format(appliance.id, appliance.name))
Appliance.kill(appliance) # Use kill because the appliance may still exist
# And now - if something happened during appliance deletion, call kill again
for appliance in Appliance.objects.filter(
marked_for_deletion=True, status_changed__lt=expiration_time).all():
self.logger.info(
"Trying to kill unkilled appliance {}/{}".format(appliance.id, appliance.name))
Appliance.kill(appliance, force_delete=True)
def generic_shepherd(self, preconfigured):
"""This task takes care of having the required templates spinned into required number of
appliances. For each template group, it keeps the last template's appliances spinned up in
required quantity. If new template comes out of the door, it automatically kills the older
running template's appliances and spins up new ones. Sorts the groups by the fulfillment."""
for gs in sorted(
GroupShepherd.objects.all(), key=lambda g: g.get_fulfillment_percentage(preconfigured)):
prov_filter = {'provider__user_groups': gs.user_group}
group_versions = Template.get_versions(
template_group=gs.template_group, ready=True, usable=True, preconfigured=preconfigured,
container=None, **prov_filter)
group_dates = Template.get_dates(
template_group=gs.template_group, ready=True, usable=True, preconfigured=preconfigured,
container=None, **prov_filter)
if group_versions:
# Downstream - by version (downstream releases)
version = group_versions[0]
# Find the latest date (one version can have new build)
dates = Template.get_dates(
template_group=gs.template_group, ready=True, usable=True,
version=group_versions[0], preconfigured=preconfigured, container=None,
**prov_filter)
if not dates:
# No template yet?
continue
date = dates[0]
filter_keep = {"version": version, "date": date, 'container': None}
filters_kill = []
for kill_date in dates[1:]:
filters_kill.append({"version": version, "date": kill_date})
for kill_version in group_versions[1:]:
filters_kill.append({"version": kill_version})
elif group_dates:
# Upstream - by date (upstream nightlies)
filter_keep = {"date": group_dates[0], 'container': None}
filters_kill = [{"date": v} for v in group_dates[1:]]
else:
continue # Ignore this group, no templates detected yet
filter_keep.update(prov_filter)
for filt in filters_kill:
filt.update(prov_filter)
# Keeping current appliances
# Retrieve list of all templates for given group
# I know joins might be a bit better solution but I'll leave that for later.
possible_templates = list(
Template.objects.filter(
usable=True, ready=True, template_group=gs.template_group,
preconfigured=preconfigured, **filter_keep).all())
# If it can be deployed, it must exist
possible_templates_for_provision = filter(lambda tpl: tpl.exists, possible_templates)
appliances = []
for template in possible_templates:
appliances.extend(
Appliance.objects.filter(
template=template, appliance_pool=None, marked_for_deletion=False))
# If we then want to delete some templates, better kill the eldest. status_changed
# says which one was provisioned when, because nothing else then touches that field.
appliances.sort(key=lambda appliance: appliance.status_changed)
pool_size = gs.template_pool_size if preconfigured else gs.unconfigured_template_pool_size
if len(appliances) < pool_size and possible_templates_for_provision:
# There must be some templates in order to run the provisioning
# Provision ONE appliance at time for each group, that way it is possible to maintain
# reasonable balancing
new_appliance_name = settings.APPLIANCE_FORMAT.format(
group=template.template_group.id,
date=template.date.strftime("%y%m%d"),
rnd=fauxfactory.gen_alphanumeric(8))
with transaction.atomic():
# Now look for templates that are on non-busy providers
tpl_free = filter(
lambda t: t.provider.free,
possible_templates_for_provision)
if tpl_free:
appliance = Appliance(
template=sorted(tpl_free, key=lambda t: t.provider.appliance_load)[0],
name=new_appliance_name)
appliance.save()
if tpl_free:
self.logger.info(
"Adding an appliance to shepherd: {}/{}".format(appliance.id, appliance.name))
clone_template_to_appliance.delay(appliance.id, None)
elif len(appliances) > pool_size:
# Too many appliances, kill the surplus
# Only kill those that are visible only for one group. This is necessary so the groups
# don't "fight"
for appliance in appliances[:len(appliances) - pool_size]:
if appliance.is_visible_only_in_group(gs.user_group):
self.logger.info("Killing an extra appliance {}/{} in shepherd".format(
appliance.id, appliance.name))
Appliance.kill(appliance)
# Killing old appliances
for filter_kill in filters_kill:
for template in Template.objects.filter(
ready=True, usable=True, template_group=gs.template_group,
preconfigured=preconfigured, container=None, **filter_kill):
for a in Appliance.objects.filter(
template=template, appliance_pool=None, marked_for_deletion=False):
self.logger.info(
"Killing appliance {}/{} in shepherd because it is obsolete now".format(
a.id, a.name))
Appliance.kill(a)
@singleton_task()
def free_appliance_shepherd(self):
generic_shepherd(self, True)
generic_shepherd(self, False)
@singleton_task()
def wait_appliance_ready(self, appliance_id):
"""This task checks for appliance's readiness for use. The checking loop is designed as retrying
the task to free up the queue."""
try:
appliance = Appliance.objects.get(id=appliance_id)
if appliance.appliance_pool is not None:
if appliance.appliance_pool.not_needed_anymore:
# Terminate task chain
self.request.callbacks[:] = []
kill_appliance.delay(appliance_id)
return
if appliance.power_state == Appliance.Power.UNKNOWN or appliance.ip_address is None:
self.retry(args=(appliance_id,), countdown=30, max_retries=45)
if Appliance.objects.get(id=appliance_id).cfme.ipapp.is_web_ui_running():
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ready = True
appliance.save(update_fields=['ready'])
appliance.set_status("The appliance is ready.")
with diaper:
appliance.synchronize_metadata()
else:
with transaction.atomic():
appliance = Appliance.objects.get(id=appliance_id)
appliance.ready = False
appliance.save(update_fields=['ready'])
appliance.set_status("Waiting for UI to appear.")
self.retry(args=(appliance_id,), countdown=30, max_retries=45)
except ObjectDoesNotExist:
# source object is not present, terminating
return
@singleton_task()
def anyvm_power_on(self, provider, vm):
provider = Provider.objects.get(id=provider, working=True, disabled=False)
provider.api.start_vm(vm)
@singleton_task()
def anyvm_power_off(self, provider, vm):
provider = Provider.objects.get(id=provider, working=True, disabled=False)
provider.api.stop_vm(vm)
@singleton_task()
def anyvm_suspend(self, provider, vm):
provider = Provider.objects.get(id=provider, working=True, disabled=False)
provider.api.suspend_vm(vm)
@singleton_task()
def anyvm_delete(self, provider, vm):
provider = Provider.objects.get(id=provider, working=True, disabled=False)
provider.api.delete_vm(vm)
@singleton_task()
def delete_template_from_provider(self, template_id):
template = Template.objects.get(id=template_id)
if not template.provider.is_working:
raise RuntimeError('Provider is not working.')
try:
template.provider_api.delete_template(template.name)
except Exception as e:
self.logger.exception(e)
return False
with transaction.atomic():
template = Template.objects.get(pk=template.pk)
template.exists = False
template.save(update_fields=['exists'])
return True
@singleton_task()
def appliance_rename(self, appliance_id, new_name):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
return None
if not appliance.provider.is_working:
raise RuntimeError('Provider is not working.')
if not appliance.provider.allow_renaming:
return None
if appliance.name == new_name:
return None
with redis.appliances_ignored_when_renaming(appliance.name, new_name):
self.logger.info("Renaming {}/{} to {}".format(appliance_id, appliance.name, new_name))
appliance.name = appliance.provider_api.rename_vm(appliance.name, new_name)
appliance.save(update_fields=['name'])
return appliance.name
@singleton_task()
def rename_appliances_for_pool(self, pool_id):
with transaction.atomic():
try:
appliance_pool = AppliancePool.objects.get(id=pool_id)
except ObjectDoesNotExist:
return
appliances = [
appliance
for appliance
in appliance_pool.appliances
if appliance.provider_api.can_rename
]
for appliance in appliances:
if not appliance.provider.allow_renaming:
continue
if not appliance.provider.is_working:
continue
new_name = '{}_'.format(appliance_pool.owner.username)
if appliance.version and not appliance.version.startswith('...'):
# CFME
new_name += 'cfme_{}_'.format(appliance.version.replace('.', ''))
else:
# MIQ
new_name += 'miq_'
new_name += '{}_{}'.format(
appliance.template.date.strftime("%y%m%d"),
fauxfactory.gen_alphanumeric(length=4))
appliance_rename.apply_async(
countdown=10, # To prevent clogging with the transaction.atomic
args=(appliance.id, new_name))
@singleton_task(soft_time_limit=60)
def check_update(self):
sprout_sh = project_path.join("sprout").join("sprout.sh")
try:
result = command.run([sprout_sh.strpath, "check-update"])
except command.CommandException as e:
result = e
needs_update = result.output.strip().lower() != "up-to-date"
redis.set("sprout-needs-update", needs_update)
@singleton_task()
def scavenge_managed_providers(self):
chord_tasks = []
for appliance in Appliance.objects.exclude(appliance_pool=None):
chord_tasks.append(scavenge_managed_providers_from_appliance.si(appliance.id))
chord(chord_tasks)(calculate_provider_management_usage.s())
@singleton_task(soft_time_limit=180)
def scavenge_managed_providers_from_appliance(self, appliance_id):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
return None
try:
managed_providers = appliance.ipapp.managed_known_providers
appliance.managed_providers = [prov.key for prov in managed_providers]
except Exception as e:
# To prevent single appliance messing up whole result
provider_error_logger().error("{}: {}".format(type(e).__name__, str(e)))
return None
return appliance.id
@singleton_task()
def calculate_provider_management_usage(self, appliance_ids):
results = {}
for appliance_id in filter(lambda id: id is not None, appliance_ids):
try:
appliance = Appliance.objects.get(id=appliance_id)
except ObjectDoesNotExist:
# Deleted in meanwhile
continue
for provider_key in appliance.managed_providers:
if provider_key not in results:
results[provider_key] = []
results[provider_key].append(appliance.id)
for provider in Provider.objects.filter(working=True, disabled=False):
provider.appliances_manage_this_provider = results.get(provider.id, [])
@singleton_task()
def mailer_version_mismatch(self):
"""This is usually called per-mismatch, but as the mismatches are stored in database and the
mail can fail sending, so this can send the mismatches in a batch in this case."""
with transaction.atomic():
mismatches = MismatchVersionMailer.objects.filter(sent=False)
if not mismatches:
return
email_body = """\
Hello,
I am Sprout template version mismatch spammer. I think there are some version mismatches.
Here is the list:
{}
Sincerely,
Sprout template version mismatch spammer™
""".format(
"\n".join(
"* {} @ {} : supposed {} , true {}".format(
mismatch.template_name, mismatch.provider.id, mismatch.supposed_version,
mismatch.actual_version)
for mismatch in mismatches
)
)
user_mails = []
for user in User.objects.filter(is_superuser=True):
if user.email:
user_mails.append(user.email)
result = send_mail(
"Template version mismatches detected",
email_body,
"[email protected]",
user_mails,
)
if result > 0:
for mismatch in mismatches:
mismatch.sent = True
mismatch.save(update_fields=['sent'])
@singleton_task()
def obsolete_template_deleter(self):
for group in Group.objects.all():
if group.template_obsolete_days_delete:
# We can delete based on the template age
obsolete_templates = group.obsolete_templates
if obsolete_templates is not None:
for template in obsolete_templates:
if template.can_be_deleted:
delete_template_from_provider.delay(template.id)
@singleton_task()
def connect_direct_lun(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
if not appliance.provider.is_working:
raise RuntimeError('Provider is not working.')
if not hasattr(appliance.provider_api, "connect_direct_lun_to_appliance"):
return False
try:
appliance.provider_api.connect_direct_lun_to_appliance(appliance.name, False)
except Exception as e:
appliance.set_status("LUN: {}: {}".format(type(e).__name__, str(e)))
return False
else:
appliance.reload()
with transaction.atomic():
appliance.lun_disk_connected = True
appliance.save()
return True
@singleton_task()
def disconnect_direct_lun(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
if not appliance.provider.is_working:
raise RuntimeError('Provider is not working.')
if not appliance.lun_disk_connected:
return False
if not hasattr(appliance.provider_api, "connect_direct_lun_to_appliance"):
return False
try:
appliance.provider_api.connect_direct_lun_to_appliance(appliance.name, True)
except Exception as e:
appliance.set_status("LUN: {}: {}".format(type(e).__name__, str(e)))
return False
else:
appliance.reload()
with transaction.atomic():
appliance.lun_disk_connected = False
appliance.save()
return True
@singleton_task()
def appliance_yum_update(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
appliance.ipapp.update_rhel(reboot=False)
@singleton_task()
def pick_templates_for_deletion(self):
"""Applies some heuristics to guess templates that might be candidates to deletion."""
to_mail = {}
for group in Group.objects.all():
for zstream, versions in group.pick_versions_to_delete().items():
for version in versions:
for template in Template.objects.filter(
template_group=group, version=version, exists=True, suggested_delete=False):
template.suggested_delete = True
template.save(update_fields=['suggested_delete'])
if group.id not in to_mail:
to_mail[group.id] = {}
if zstream not in to_mail[group.id]:
to_mail[group.id][zstream] = {}
if version not in to_mail[group.id][zstream]:
to_mail[group.id][zstream][version] = []
to_mail[group.id][zstream][version].append(
"{} @ {}".format(template.name, template.provider.id))
# TODO: Figure out why it was spamming
if to_mail and False:
data = yaml.safe_dump(to_mail, default_flow_style=False)
email_body = """\
Hello,
just letting you know that there are some templates that you might like to delete:
{}
Visit Sprout's Templates page for more informations.
Sincerely,
Sprout.
""".format(data)
user_mails = []
for user in User.objects.filter(is_superuser=True):
if user.email:
user_mails.append(user.email)
send_mail(
"Possible candidates for template deletion",
email_body,
"[email protected]",
user_mails,
)
@singleton_task()
def check_swap_in_appliances(self):
chord_tasks = []
for appliance in Appliance.objects.filter(
ready=True, power_state=Appliance.Power.ON,
marked_for_deletion=False).exclude(power_state=Appliance.Power.ORPHANED):
if appliance.is_openshift:
continue
chord_tasks.append(check_swap_in_appliance.si(appliance.id))
chord(chord_tasks)(notify_owners.s())
@singleton_task()
def check_swap_in_appliance(self, appliance_id):
appliance = Appliance.objects.get(id=appliance_id)
try:
swap_amount = appliance.ipapp.swap
except (SSHException, socket.error, Exception) as e:
if type(e) is Exception and 'SSH is unavailable' not in str(e):
# Because otherwise it might not be an SSH error
raise
ssh_failed = True
swap_amount = None
else:
ssh_failed = False
went_up = (
(appliance.swap is not None and swap_amount > appliance.swap) or
(appliance.swap is None and swap_amount is not None and swap_amount > 0))
ssh_failed_changed = ssh_failed and not appliance.ssh_failed
appliance.swap = swap_amount
appliance.ssh_failed = ssh_failed
appliance.save()
# Returns a tuple - (appliance_id, went_up?, current_amount, ssh_failed?)
return appliance.id, went_up, swap_amount, ssh_failed_changed
@singleton_task()
def notify_owners(self, results):
# Filter out any errors
results = [x for x in results if isinstance(x, (list, tuple)) and len(x) == 4]
per_user = {}
for appliance_id, went_up, current_swap, ssh_failed_changed in results:
if not went_up and not ssh_failed_changed:
# Not interested
continue
appliance = Appliance.objects.get(id=appliance_id)
if appliance.appliance_pool is not None:
username = appliance.appliance_pool.owner.username
user = appliance.appliance_pool.owner
else:
username = 'SHEPHERD'
user = None
issues = []
if went_up:
issues.append('swap++ {}M'.format(current_swap))
if ssh_failed_changed:
issues.append('ssh unreachable')
message = '{}/{} {}'.format(
appliance.name, appliance.ip_address, ', '.join(issues))
if user is None:
# No email
continue
if not user.email:
# Same here
continue
# We assume that "living" users have an e-mail set therefore we will not nag about bots'
# appliances.
send_message('{}: {}'.format(username, message))
# Add the message to be sent
if user not in per_user:
per_user[user] = []
per_user[user].append(message)
# Send out the e-mails
for user, messages in per_user.items():
appliance_list = '\n'.join('* {}'.format(message) for message in messages)
email_body = """\
Hello,
I discovered that some of your appliances are behaving badly. Please check them out:
{}
Best regards,
The Sprout™
""".format(appliance_list)
send_mail(
"[Sprout] Appliance swap report",
email_body,
"[email protected]",
[user.email],
)
@singleton_task()
def appliances_synchronize_metadata(self):
for appliance in Appliance.objects.all():
try:
appliance.synchronize_metadata()
except ObjectDoesNotExist:
return
@singleton_task()
def synchronize_untracked_vms(self):
for provider in Provider.objects.filter(working=True, disabled=False):
synchronize_untracked_vms_in_provider.delay(provider.id)
def parsedate(d):
if d is None:
return d
else:
return iso8601.parse_date(d)
@singleton_task()
def synchronize_untracked_vms_in_provider(self, provider_id):
"""'re'-synchronizes any vms that might be lost during outages."""
provider = Provider.objects.get(id=provider_id, working=True, disabled=False)
provider_api = provider.api
if not hasattr(provider_api, 'list_vm'):
# This provider does not have VMs (eg. Hawkular or Openshift)
return
for vm_name in sorted(map(str, provider_api.list_vm())):
if Appliance.objects.filter(name=vm_name, template__provider=provider).count() != 0:
continue
# We have an untracked VM. Let's investigate
try:
appliance_id = provider_api.get_meta_value(vm_name, 'sprout_id')
except KeyError:
continue
except NotImplementedError:
# Do not bother if not implemented in the API
return
# just check it again ...
if Appliance.objects.filter(id=appliance_id).count() == 1:
# For some reason it is already in
continue
# Now it appears that this is a VM that was in Sprout
construct = {'id': appliance_id}
# Retrieve appliance data
try:
self.logger.info('Trying to reconstruct appliance %d/%s', appliance_id, vm_name)
construct['name'] = vm_name
template_id = provider_api.get_meta_value(vm_name, 'sprout_source_template_id')
# Templates are not deleted from the DB so this should be OK.
construct['template'] = Template.objects.get(id=template_id)
construct['name'] = vm_name
construct['ready'] = provider_api.get_meta_value(vm_name, 'sprout_ready')
construct['description'] = provider_api.get_meta_value(vm_name, 'sprout_description')
construct['lun_disk_connected'] = provider_api.get_meta_value(
vm_name, 'sprout_lun_disk_connected')
construct['swap'] = provider_api.get_meta_value(vm_name, 'sprout_swap')
construct['ssh_failed'] = provider_api.get_meta_value(vm_name, 'sprout_ssh_failed')
# Time fields
construct['datetime_leased'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_datetime_leased'))
construct['leased_until'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_leased_until'))
construct['status_changed'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_status_changed'))
construct['created_on'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_created_on'))
construct['modified_on'] = parsedate(
provider_api.get_meta_value(vm_name, 'sprout_modified_on'))
except KeyError as e:
self.logger.error('Failed to reconstruct %d/%s', appliance_id, vm_name)
self.logger.exception(e)
continue
# Retrieve pool data if applicable
try:
pool_id = provider_api.get_meta_value(vm_name, 'sprout_pool_id')
pool_construct = {'id': pool_id}
pool_construct['total_count'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_total_count')
group_id = provider_api.get_meta_value(
vm_name, 'sprout_pool_group')
pool_construct['group'] = Group.objects.get(id=group_id)
try:
construct_provider_id = provider_api.get_meta_value(
vm_name, 'sprout_pool_provider')
pool_construct['provider'] = Provider.objects.get(id=construct_provider_id)
except (KeyError, ObjectDoesNotExist):
# optional
pool_construct['provider'] = None
pool_construct['version'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_version')
pool_construct['date'] = parsedate(provider_api.get_meta_value(
vm_name, 'sprout_pool_appliance_date'))
owner_id = provider_api.get_meta_value(
vm_name, 'sprout_pool_owner_id')
try:
owner = User.objects.get(id=owner_id)
except ObjectDoesNotExist:
owner_username = provider_api.get_meta_value(
vm_name, 'sprout_pool_owner_username')
owner = User(id=owner_id, username=owner_username)
owner.save()
pool_construct['owner'] = owner
pool_construct['preconfigured'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_preconfigured')
pool_construct['description'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_description')
pool_construct['not_needed_anymore'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_not_needed_anymore')
pool_construct['finished'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_finished')
pool_construct['yum_update'] = provider_api.get_meta_value(
vm_name, 'sprout_pool_yum_update')
try:
construct['appliance_pool'] = AppliancePool.objects.get(id=pool_id)
except ObjectDoesNotExist:
pool = AppliancePool(**pool_construct)
pool.save()
construct['appliance_pool'] = pool
except KeyError as e:
pass
appliance = Appliance(**construct)
appliance.save()
# And now, refresh!
refresh_appliances_provider.delay(provider.id)
@singleton_task()
def read_docker_images_from_url(self):
for group in Group.objects.exclude(Q(templates_url=None) | Q(templates_url='')):
read_docker_images_from_url_group.delay(group.id)
@singleton_task()
def read_docker_images_from_url_group(self, group_id):
group = Group.objects.get(id=group_id)
with closing(urlopen(group.templates_url)) as http:
root = etree.parse(http, parser=etree.HTMLParser()).getroot()
result = set()
for link in root.xpath('//a[../../td/img[contains(@src, "folder")]]'):
try:
href = link.attrib['href']
except KeyError:
continue
url = group.templates_url + href
version_with_suffix = href.rstrip('/') # Does not contain the last digit
try:
with closing(urlopen(url + 'cfme-docker')) as http:
cfme_docker = http.read().strip()
except HTTPError:
self.logger.info('Skipping {} (no docker)'.format(url))
continue
try:
with closing(urlopen(url + 'version')) as http:
cfme_version = http.read().strip()
if '-' in version_with_suffix:
# Use the suffix from the folder name
suffix = version_with_suffix.rsplit('-', 1)[-1]
cfme_version = '{}-{}'.format(cfme_version, suffix)
except HTTPError:
self.logger.info('Skipping {} (no version)'.format(url))
continue
cfme_docker = re.split(r'\s+', cfme_docker)
if len(cfme_docker) == 2:
pull_url, latest_mapping = cfme_docker
latest = re.sub(r'^\(latest=([^)]+)\)$', '\\1', latest_mapping)
proper_pull_url = re.sub(r':latest$', ':{}'.format(latest), pull_url)
elif cfme_docker and cfme_docker[0].lower().strip() == 'tags:':
# Multiple tags, take the longest
proper_pull_url = sorted(filter(None, cfme_docker[1:]), key=len, reverse=True)[0]
latest = proper_pull_url.rsplit(':', 1)[-1]
else:
self.logger.info('Skipping: unknown format: {!r}'.format(cfme_docker))
continue
if cfme_version in result:
continue
process_docker_images_from_url_group.delay(group.id, cfme_version, latest, proper_pull_url)
result.add(cfme_version)
@singleton_task()
def process_docker_images_from_url_group(self, group_id, version, docker_version, pull_url):
group = Group.objects.get(id=group_id)
# "-20160624221308"
date = docker_version.rsplit('-', 1)[-1]
try:
date = datetime.strptime(date, '%Y%m%d%H%M%S').date() # noqa
except AttributeError:
raise ValueError('Could not parse date from {}'.format(docker_version))
if group.template_obsolete_days is not None:
today = datetime.now().date()
age = today - date
if age > group.template_obsolete_days:
self.logger.info('Ignoring old template {} (age {} days)'.format(pull_url, age))
return
for provider in Provider.objects.filter(working=True, disabled=False):
if not provider.container_base_template:
# 11:30 PM, TODO put this check in a query
continue
if provider.remaining_configuring_slots < 1:
# Will do it later ...
continue
if provider.provider_type == 'openshift':
# openshift providers aren't containerized ones
continue
try:
Template.objects.get(
~Q(container=None), template_group=group, provider=provider, version=version,
date=date, preconfigured=True)
except ObjectDoesNotExist:
create_docker_vm.delay(group.id, provider.id, version, date, pull_url)
def docker_vm_name(version, date):
return 'docker-{}-{}-{}'.format(
re.sub(r'[^0-9a-z]', '', version.lower()),
re.sub(r'[^0-9]', '', str(date)),
fauxfactory.gen_alphanumeric(length=4).lower())
@singleton_task()
def create_docker_vm(self, group_id, provider_id, version, date, pull_url):
group = Group.objects.get(id=group_id)
provider = Provider.objects.get(id=provider_id, working=True, disabled=False)
with transaction.atomic():
if provider.remaining_configuring_slots < 1:
self.retry(
args=(group_id, provider_id, version, date, pull_url), countdown=60, max_retries=60)
new_name = docker_vm_name(version, date)
new_template = Template(
template_group=group, provider=provider,
container='cfme', name=new_name, original_name=provider.container_base_template,
version=version, date=date,
ready=False, exists=False, usable=True, preconfigured=True,
template_type=Template.DOCKER_VM)
new_template.save()
workflow = chain(
prepare_template_deploy.si(new_template.id),
configure_docker_template.si(new_template.id, pull_url),
prepare_template_seal.si(new_template.id),
prepare_template_poweroff.si(new_template.id),
prepare_template_finish.si(new_template.id),
)
workflow.link_error(prepare_template_delete_on_error.si(new_template.id))
workflow()
@singleton_task()
def configure_docker_template(self, template_id, pull_url):
template = Template.objects.get(id=template_id)
template.set_status("Waiting for SSH.")
appliance = CFMEAppliance.from_provider(
template.provider_name, template.name, container=template.container)
appliance.ipapp.wait_for_ssh()
with appliance.ipapp.ssh_client as ssh:
template.set_status("Setting the pull URL.")
ssh.run_command(
'echo "export CFME_URL={}" > /etc/cfme_pull_url'.format(pull_url), ensure_host=True)
template.set_status("Pulling the {}.".format(pull_url))
ssh.run_command('docker pull {}'.format(pull_url), ensure_host=True)
template.set_status('Pulling finished.')
@singleton_task()
def sync_appliance_hw(self, appliance_id):
Appliance.objects.get(id=appliance_id).sync_hw()
@singleton_task()
def sync_provider_hw(self, provider_id):
Provider.objects.get(id=provider_id, working=True, disabled=False).perf_sync()
@singleton_task()
def sync_quotas_perf(self):
for provider in Provider.objects.all():
sync_provider_hw.delay(provider.id)
for appliance in provider.currently_managed_appliances:
sync_appliance_hw.delay(appliance.id)
@singleton_task()
def nuke_template_configuration(self, template_id):
try:
template = Template.objects.get(id=template_id)
except ObjectDoesNotExist:
# No longer exists
return True
if template.provider.api.does_vm_exist(template.name):
self.logger.info('Found the template as a VM')
template.provider.api.delete_vm(template.name)
if template.provider.api.does_template_exist(template.name):
self.logger.info('Found the template as a template')
template.provider.api.delete_template(template.name)
template.delete()
return True
| psav/cfme_tests | sprout/appliances/tasks.py | Python | gpl-2.0 | 91,308 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Contains functions for running analysis tasks
"""
# ~~~~~ LOGGING ~~~~~~ #
import logging
logger = logging.getLogger(__name__)
# ~~~~~ LOAD MORE PACKAGES ~~~~~ #
import os
import json
from sns_classes.classes import SnsWESAnalysisOutput
import mail
import sns_tasks
import job_management
import setup_report
import validation
import _exceptions as _e
from util import qsub
# ~~~~~ LOAD CONFIGS ~~~~~ #
import config
configs = config.config
# ~~~~~ GLOBALS ~~~~~ #
extra_handlers = []
sns_started = False
"""
Used to determine if sns analysis has started
"""
# ~~~~~ FUNCTIONS ~~~~~ #
def get_task_class(task_name):
"""
Gets the task's class from the `sns_tasks` module
Parameters
----------
task_name: str
the name of an analysis task, assumed to correspond to a Class loaded in the `sns_tasks` module
Returns
-------
Class
the Class object matching the `task_name`
"""
# make sure the task is present in sns_tasks
if not task_name in dir(sns_tasks):
logger.error('Task {0} was not found in the sns_tasks module'.format(task_name))
raise _e.SnsTaskMissing(message = 'Task {0} was not found in the sns_tasks module'.format(task_name), errors = '')
else:
logger.debug('Loading task {0} '.format(task_name))
# load the task class from the module
task_class = getattr(sns_tasks, task_name)
return(task_class)
def run_tasks(tasks, analysis_dir = None, analysis = None, debug_mode = False, **kwargs):
"""
Runs a series of analysis tasks
Parameters
----------
tasks: dict
a dictionary (e.g. loaded from a YAML formatted `task_list`) containing the names of analysis tasks to be run
analysis_dir: str
the path to a directory to use for the analysis. For `sns` tasks, this corresponds to the data output location. For downstream `tasks`, this will be used to create a `SnsWESAnalysisOutput` object
analysis: SnsWESAnalysisOutput
object representing output from an `sns wes` analysis pipeline output on which to run downstream analysis tasks
debug_mode: bool
prevent the program from halting if errors are found in qsub log output files; defaults to `False`. `True` = do not stop for qsub log errors, `False` = stop if errors are found
kwargs: dict
a dictionary containing extra args to pass to the `task_class` upon initialization
Returns
-------
tasks_output: dict
a dictionary containing items output by the analysis task(s) which were run
Todo
----
Figure out what should be contained in `tasks_output`
"""
# items output by tasks which should be returned by this function
tasks_output = {}
# list of files to send in output email
tasks_output['email_files'] = []
if analysis_dir and analysis:
raise _e.ArgumentError(message = 'Both analysis_dir and analysis were passed; there can be only one.', errors = '')
if not analysis_dir and not analysis:
raise _e.ArgumentError(message = 'Neither analysis_dir nor analysis were passed; there must be one.', errors = '')
for task_name, task_params in tasks.items():
task_class = get_task_class(task_name)
# create the task object
if analysis_dir:
task = task_class(analysis_dir = analysis_dir, extra_handlers = extra_handlers, **kwargs)
if analysis:
# make sure the ana analysis ouput object is valid before continuing
if not debug_mode:
if not analysis.is_valid:
err_message = 'The analysis did not pass validations\n'
validations_message = json.dumps(analysis.validations, indent = 4)
logger.error(err_message)
raise _e.AnalysisInvalid(message = err_message + validations_message, errors = '')
task = task_class(analysis = analysis, extra_handlers = extra_handlers)
# run the task
if task_params:
# with the params
task_output = task.run(**task_params)
else:
# without the params
task_output = task.run()
# check for files from the task which should be included in email output
expected_email_files = task.get_expected_email_files()
logger.debug('task email files: {0}'.format(expected_email_files))
if expected_email_files:
for item in expected_email_files:
mail.email_files.append(item)
# check the output of the task
if task_output:
# check for background qsub jobs output by the task
task_jobs = []
for item in task_output:
if isinstance(item, qsub.Job):
task_jobs.append(item)
# if no task_jobs were produced, validate the task output immediately
if not task_jobs:
logger.debug('Validating task output files')
task.validate_output()
else:
# add task jobs to background jobs
logger.debug('Background qsub jobs were generated by the task and will be monitored at program completion')
for job in task_jobs:
job_management.background_jobs.append(job)
# add task expected output to background output to be validated later
logger.debug('Expected output files for the task will be validated at program completion')
task_output_files = task.get_expected_output_files()
for item in task_output_files:
validation.background_output_files.append(item)
# monitor and validate all background jobs
job_management.monitor_validate_background_jobs()
# validate all background output files
validation.validate_background_output_files()
return(tasks_output)
def run_sns_tasks(task_list, analysis_dir, **kwargs):
"""
Runs tasks which run analysis commands in the context of creating and running new `sns` pipeline analyses.
Parameters
----------
task_list: dict
dictionary of tasks read in from the tasks list file; must have a ``sns`` key with sub-dicts corresponding to ``sns`` tasks to run
analysis_dir: str
path to a directory to hold the analysis output
kwargs: dict
dictionary containing extra args to pass to `run_tasks`
Notes
-----
Each sns task will be run individually, so that all qsub jobs will be monitored to completion at every step.
"""
global sns_started
# get the args that were passed
fastq_dirs = kwargs.pop('fastq_dirs')
targets_bed = kwargs.pop('targets_bed')
probes_bed = kwargs.pop('probes_bed')
pairs_sheet = kwargs.pop('pairs_sheet')
logger.info('Creating new sns analysis in dir {0}'.format(os.path.abspath(analysis_dir)))
# run tasks one at a time
tasks = task_list['sns']
for key, value in tasks.items():
# checking if this is the first task that starts the sns analysis and sends an email
if not sns_started:
sns_started = True
mail.sns_start_email(analysis_dir = analysis_dir)
run_tasks(tasks = {key: value}, analysis_dir = analysis_dir, fastq_dirs = fastq_dirs, targets_bed = targets_bed, probes_bed = probes_bed, pairs_sheet = pairs_sheet, **kwargs)
def run_snsxt_tasks(task_list, analysis_dir, **kwargs):
"""
Runs the downstream `snsxt` analysis tasks on `sns` pipeline output
Parameters
----------
task_list: dict
dictionary of tasks read in from the tasks list file; must have a ``tasks`` key with sub-dicts corresponding to tasks to run
analysis_dir: str
path to a directory containing `sns` analysis output
kwargs: dict
dictionary containing extra args to pass to `run_tasks`
"""
# get the args that were passed
analysis_id = kwargs.pop('analysis_id')
results_id = kwargs.pop('results_id')
debug_mode = kwargs.pop('debug_mode')
tasks = task_list['tasks']
logger.info('Loading analysis {0} : {1} from dir {2}'.format(analysis_id, results_id, os.path.abspath(analysis_dir)))
analysis = SnsWESAnalysisOutput(dir = analysis_dir, id = analysis_id, results_id = results_id, sns_config = configs, extra_handlers = extra_handlers)
run_tasks(tasks, analysis = analysis, debug_mode = debug_mode, **kwargs)
if task_list.get('setup_report', None):
# TODO: move report out of this function and into main as part of cleanup
logger.debug('Starting report setup')
setup_report.setup_report(output_dir = analysis_dir, analysis_id = analysis_id, results_id = results_id)
| NYU-Molecular-Pathology/snsxt | snsxt/run_tasks.py | Python | gpl-3.0 | 8,751 |
if __name__ == '__main__':
asking = True
print("Juan Questions")
print("Presione 1 para salir")
while asking == True:
response = input("Pregunta algo: ")
if response == "1":
print("Salir")
asking = False
break
if response.endswith("?"):
print("Ofi")
elif response >= 'A' and response <= 'Z'or response >= 'a' and response <= 'z':
print("Chillea")
elif response == "":
print("mmm")
else:
print("Me da igual")
| arauzoliver/uip-iiig2016-prog3 | JuanQuestion/Juan.py | Python | mit | 580 |
# This file is part of BurnMan - a thermoelastic and thermodynamic toolkit for the Earth and Planetary Sciences
# Copyright (C) 2012 - 2015 by the BurnMan team, released under the GNU
# GPL v2 or later.
from __future__ import absolute_import
import numpy as np
import scipy.integrate as integrate
from . import geotherm
from . import seismic
from . import averaging_schemes
def velocities_from_rock(rock, pressures, temperatures, averaging_scheme=averaging_schemes.VoigtReussHill()):
"""
This function is deprecated. Use :func:`burnman.material.Material.evaluate` instead.
A function that rolls several steps into one: given a rock and a list of
pressures and temperatures, it calculates the elastic moduli of the
individual phases using calculate_moduli(), averages them using
average_moduli(), and calculates the seismic velocities using
compute_velocities().
Parameters
----------
rock : :class:`burnman.Material`
this is the rock for which you are calculating velocities
pressures: list of float
list of pressures you want to evaluate the rock at. :math:`[Pa]`
temperatures: list of float
list of temperatures you want to evaluate the rock at. :math:`[K]`
averaging_scheme: :class:`burnman.averaging_schemes.AveragingScheme`
Averaging scheme to use.
Returns
-------
rho, V_p, V_s, V_phi, K, G : lists of floats
Lists of density [kg/m^3], P-wave velocity [m/s], shear-wave velocity [m/s], bulk sound velocity [m/s], bulk modulus [Pa], and shear modulus [Pa] for each P,T point.
"""
old_averaging_scheme = rock.averaging_scheme
rock.set_averaging_scheme(averaging_scheme)
rho, vp, vs, vphi, K, G = rock.evaluate(
['rho', 'v_p', 'v_s', 'v_phi', 'K_S', 'G'], pressures, temperatures)
rock.set_averaging_scheme(old_averaging_scheme)
return rho, vp, vs, vphi, K, G
def compare_l2(depth, calc, obs):
"""
PUT IN TOOLS
Computes the L2 norm for N profiles at a time (assumed to be linear between points).
.. math:: math does not work yet...
\sum_{i=1}^{\\infty} x_{i}
:type depths: array of float
:param depths: depths. :math:`[m]`
:type calc: list of arrays of float
:param calc: N arrays calculated values, e.g. [mat_vs,mat_vphi]
:type obs: list of arrays of float
:param obs: N arrays of values (observed or calculated) to compare to , e.g. [seis_vs, seis_vphi]
:returns: array of L2 norms of length N
:rtype: array of floats
"""
err = []
for l in range(len(calc)):
err.append(l2(depth, calc[l], obs[l]))
return err
def compare_chifactor(calc, obs):
"""
PUT IN TOOLS
Computes the chi factor for N profiles at a time. Assumes a 1% a priori uncertainty on the seismic model.
:type calc: list of arrays of float
:param calc: N arrays calculated values, e.g. [mat_vs,mat_vphi]
:type obs: list of arrays of float
:param obs: N arrays of values (observed or calculated) to compare to , e.g. [seis_vs, seis_vphi]
:returns: error array of length N
:rtype: array of floats
"""
err = []
for l in range(len(calc)):
err.append(chi_factor(calc[l], obs[l]))
return err
def l2(x, funca, funcb):
"""
PUT IN TOOLS
Computes the L2 norm for one profile(assumed to be linear between points).
:type x: array of float
:param x: depths :math:`[m]`.
:type funca: list of arrays of float
:param funca: array calculated values
:type funcb: list of arrays of float
:param funcb: array of values (observed or calculated) to compare to
:returns: L2 norm
:rtype: array of floats
"""
diff = np.array(funca - funcb)
diff = diff * diff
return integrate.trapz(diff, x)
def nrmse(x, funca, funcb):
"""
PUT IN TOOLS
Normalized root mean square error for one profile
:type x: array of float
:param x: depths in m.
:type funca: list of arrays of float
:param funca: array calculated values
:type funcb: list of arrays of float
:param funcb: array of values (observed or calculated) to compare to
:returns: RMS error
:rtype: array of floats
"""
diff = np.array(funca - funcb)
diff = diff * diff
rmse = np.sqrt(np.sum(diff) / x)
nrmse = rmse / (np.max(funca) - np.min(funca))
return nrmse
def chi_factor(calc, obs):
"""
PUT IN TOOLS
:math:`\\chi` factor for one profile assuming 1% uncertainty on the reference model (obs)
:type calc: list of arrays of float
:param calc: array calculated values
:type obs: list of arrays of float
:param obs: array of reference values to compare to
:returns: :math:`\\chi` factor
:rtype: array of floats
"""
err = np.empty_like(calc)
for i in range(len(calc)):
err[i] = pow((calc[i] - obs[i]) / (0.01 * np.mean(obs)), 2.)
err_tot = np.sum(err) / len(err)
return err_tot
| ian-r-rose/burnman | burnman/main.py | Python | gpl-2.0 | 4,969 |
from __future__ import absolute_import
#
# This is an extension to the Nautilus file manager to allow better
# integration with the Subversion source control system.
#
# Copyright (C) 2006-2008 by Jason Field <[email protected]>
# Copyright (C) 2007-2008 by Bruce van der Kooij <[email protected]>
# Copyright (C) 2008-2010 by Adam Plumb <[email protected]>
#
# RabbitVCS is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# RabbitVCS is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with RabbitVCS; If not, see <http://www.gnu.org/licenses/>.
#
from rabbitvcs.util import helper
import gi
gi.require_version("Gtk", "3.0")
sa = helper.SanitizeArgv()
from gi.repository import Gtk, GObject, Gdk
sa.restore()
from rabbitvcs.ui import InterfaceView
from rabbitvcs.ui.action import SVNAction
import rabbitvcs.ui.widget
import rabbitvcs.ui.dialog
from rabbitvcs.util.strings import S
from rabbitvcs import gettext
_ = gettext.gettext
class SVNImport(InterfaceView):
def __init__(self, path):
InterfaceView.__init__(self, "import", "Import")
self.get_widget("Import").set_title(_("Import - %s") % path)
self.path = path
self.vcs = rabbitvcs.vcs.VCS()
self.svn = self.vcs.svn()
if self.svn.is_in_a_or_a_working_copy(path):
self.get_widget("repository").set_text(S(self.svn.get_repo_url(path)).display())
self.repositories = rabbitvcs.ui.widget.ComboBox(
self.get_widget("repositories"),
helper.get_repository_paths()
)
self.message = rabbitvcs.ui.widget.TextView(
self.get_widget("message")
)
def on_ok_clicked(self, widget):
url = self.get_widget("repository").get_text()
if not url:
rabbitvcs.ui.dialog.MessageBox(_("The repository URL field is required."))
return
ignore = not self.get_widget("include_ignored").get_active()
self.hide()
self.action = rabbitvcs.ui.action.SVNAction(
self.svn,
register_gtk_quit=self.gtk_quit_is_set()
)
self.action.append(self.action.set_header, _("Import"))
self.action.append(self.action.set_status, _("Running Import Command..."))
self.action.append(
self.svn.import_,
self.path,
url,
self.message.get_text(),
ignore=ignore
)
self.action.append(self.action.set_status, _("Completed Import"))
self.action.append(self.action.finish)
self.action.schedule()
def on_previous_messages_clicked(self, widget, data=None):
dialog = rabbitvcs.ui.dialog.PreviousMessages()
message = dialog.run()
if message is not None:
self.message.set_text(S(message).display())
classes_map = {
rabbitvcs.vcs.VCS_SVN: SVNImport
}
def import_factory(path):
vcs = rabbitvcs.vcs.VCS_SVN
return classes_map[vcs](path)
if __name__ == "__main__":
from rabbitvcs.ui import main
(options, paths) = main(usage="Usage: rabbitvcs import [path]")
window = import_factory(paths[0])
window.register_gtk_quit()
Gtk.main()
| rabbitvcs/rabbitvcs | rabbitvcs/ui/import.py | Python | gpl-2.0 | 3,606 |
import urllib.request
import urllib.error
import urllib.parse
from urllib.request import HTTPHandler, HTTPSHandler
from wsgi_intercept import InterceptedHTTPConnection
class InterceptedHTTPHandler(HTTPHandler):
"""
Override the default HTTPHandler class with one that uses the
WSGI_HTTPConnection class to open HTTP URLs.
"""
def http_open(self, req):
return self.do_open(InterceptedHTTPConnection, req)
try:
import ssl
from wsgi_intercept import InterceptedHTTPSConnection
has_ssl_support = True
except ImportError:
has_ssl_support = False
else:
class InterceptedHTTPSHandler(HTTPSHandler):
"""
Override the default HTTPSHandler class with one that uses the
WSGI_HTTPConnection class to open HTTPS URLs.
"""
def https_open(self, req):
return self.do_open(InterceptedHTTPSConnection, req)
def install():
handlers = [InterceptedHTTPHandler()]
if has_ssl_support:
handlers.append(InterceptedHTTPSHandler())
# Build and install an opener that users our intercepted handler.
opener = urllib.request.build_opener(*handlers)
urllib.request.install_opener(opener)
def uninstall():
urllib.request.install_opener(None)
| pumazi/wsgi_intercept2 | wsgi_intercept/urllib2_intercept/wsgi_urllib2.py | Python | mit | 1,255 |
# -*- coding: utf-8 -*-
from plone.indexer import indexer
from zope.interface import Interface
import plone.api
@indexer(Interface)
def customer_role(obj):
"""Index users and groups with ``Customer`` role directly on the context.
Don't index inherited `Customer` role. Groups are prefixed with ``group:``
"""
users = obj.users_with_local_role('Customer') # get non-aquired roles
ret = [plone.api.group.get(it) and 'group:%s' % it or it for it in users]
return ret
| TheVirtualLtd/bda.plone.orders | src/bda/plone/orders/indexer.py | Python | bsd-3-clause | 493 |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from numpy import sqrt
import operator
BASIC_CUBE = [(0, 0, 0), # 0
(0.5, 0, 0.5), # 1
(0.25, 0.25, 0.25), # 2
(0.75, 0.25, 0.75), # 3
(0, 0.5, 0.5), # 4
(0.5, 0.5, 0), # 5
(0.25, 0.75, 0.75), # 6
(0.75, 0.75, 0.25)] # 7
def get_neighbor_offset_table():
ret = [[((0, 0, 0), 2), ((-1, 0, -1), 3), ((-1, -1, 0), 6), ((0, -1, -1), 7)],
[((0, 0, 0), 2), ((0, 0, 0), 3), ((0, -1, 0), 6), ((0, -1, 0), 7)],
[((0, 0, 0), 0), ((0, 0, 0), 1), ((0, 0, 0), 4), ((0, 0, 0), 5)],
[((1, 0, 1), 0), ((0, 0, 0), 1), ((0, 0, 1), 4), ((1, 0, 0), 5)],
[((0, 0, 0), 2), ((0, 0, -1), 3), ((0, 0, 0), 6), ((0, 0, -1), 7)],
[((0, 0, 0), 2), ((-1, 0, 0), 3), ((-1, 0, 0), 6), ((0, 0, 0), 7)],
[((1, 1, 0), 0), ((0, 1, 0), 1), ((0, 0, 0), 4), ((1, 0, 0), 5)],
[((0, 1, 1), 0), ((0, 1, 0), 1), ((0, 0, 1), 4), ((0, 0, 0), 5)]]
return map(lambda j: map(lambda i: Locator(*i), j), ret)
def mul((x, y, z), d):
return (x * d, y * d, z * d)
def add(a, b):
return map(lambda (x, y): x + y, zip(a, b))
def node_cube(spacing):
return map(lambda i: mul(i, spacing), BASIC_CUBE)
def get_mesh((x, y, z), spacing):
c = []
for i in range(x):
xo = i * spacing
for j in range(y):
yo = j * spacing
for k in range(z):
zo = k * spacing
nodes = node_cube(spacing)
nodes = map(lambda i: add(i, (xo, yo, zo)), nodes)
c += nodes
return c
class Locator:
def __init__(self, pos, mod_ind):
self.pos = pos
self.mod_ind = mod_ind
class WaveguideMesh:
def __init__(self, dim, spacing):
self.mesh = get_mesh(dim, spacing)
self.dim = dim
self.offsets = get_neighbor_offset_table()
def get_index(self, locator):
i, j, k = self.dim
x, y, z = locator.pos
l = len(BASIC_CUBE)
return locator.mod_ind + x * l + y * i * l + z * i * j * l
def get_locator(self, index):
i, j, k = self.dim
mod_ind = index % len(BASIC_CUBE)
index -= mod_ind
index /= len(BASIC_CUBE)
x = index % i
index -= x
index /= i
y = index % j
index -= y
index /= j
z = index % k
index -= z
index /= k
return Locator((x, y, z), mod_ind)
def locator_filter(self, c, relative):
x, y, z = self.dim
rlx, rly, rlz = add(c.pos, relative.pos)
return 0 <= rlx < x and 0 <= rly < y and 0 <= rlz < z
def get_absolute_neighbors(self, index):
locator = self.get_locator(index)
x, y, z = locator.pos
mod_ind = locator.mod_ind
relative = self.offsets[mod_ind]
ret = []
for i in relative:
summed = add(locator.pos, i.pos)
sx, sy, sz = summed
is_neighbor = (0 <= summed[0] < self.dim[0] and
0 <= summed[1] < self.dim[1] and
0 <= summed[2] < self.dim[2])
ind = self.get_index(Locator(summed, i.mod_ind)) if is_neighbor else -1;
ret.append(ind)
return ret
def concat(l):
return reduce(operator.add, l)
def main():
waveguide = WaveguideMesh((2, 2, 2), 1)
x, y, z = map(lambda i: np.array(i), zip(*waveguide.mesh))
max_range = np.array([x.max() - x.min(), y.max() - y.min(), z.max() - z.min()]).max() / 2.0
mean_x = x.mean()
mean_y = y.mean()
mean_z = z.mean()
fig = plt.figure()
for plot in range(8):
ax = fig.add_subplot(331 + plot, projection='3d', aspect='equal')
pos = waveguide.get_index(Locator((0, 0, 0), plot))
n = waveguide.get_absolute_neighbors(pos)
n = filter(lambda i: i >= 0, n)
p = []
p += [waveguide.mesh[i] for i in n]
p += [waveguide.mesh[pos]]
print plot, p
ax.scatter(*zip(*p))
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax = fig.add_subplot(339, projection='3d', aspect='equal')
ax.scatter(*zip(*waveguide.mesh))
ax.set_xlim(mean_x - max_range, mean_x + max_range)
ax.set_ylim(mean_y - max_range, mean_y + max_range)
ax.set_zlim(mean_z - max_range, mean_z + max_range)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
plt.show()
if __name__ == "__main__":
main()
| reuk/wayverb | scripts/python/iterative_tetrahedral.py | Python | gpl-2.0 | 4,819 |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
from paddle.fluid.framework import convert_np_dtype_to_dtype_, Program, program_guard
import paddle.fluid.core as core
import numpy as np
import copy
import unittest
import sys
sys.path.append("../")
from op_test import OpTest
class TestSequenceLastStepOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
def test_Variable():
# the input must be Variable
input_data = np.random.randint(1, 5, [4]).astype("int64")
fluid.layers.sequence_last_step(input_data)
self.assertRaises(TypeError, test_Variable)
def test_input_dtype():
# the dtype of input must be int64
type_data = fluid.layers.data(
name='type_data',
shape=[7, 1],
append_batch_size=False,
dtype='int64',
lod_level=1)
fluid.layers.sequence_last_step(type_data)
self.assertRaises(TypeError, test_input_dtype)
if __name__ == '__main__':
unittest.main()
| luotao1/Paddle | python/paddle/fluid/tests/unittests/sequence/test_sequence_last_step.py | Python | apache-2.0 | 1,807 |
# implemented with list comprehension with side-effects and a global variable
# there's a simpler way to do it with list appends that's probably no less efficient, since Python arrays are dynamic, but I wanted to try this out instead
from collections import Counter
c = Counter()
# for use in list comprehensions with side effects! Naughty...
def count_and_return(x):
c[x] += 1
return x
def delete_nth(arr,max_e):
if max_e <= 0:
return []
global c
c = Counter()
return [count_and_return(x) for x in arr if c[x] < max_e] # note: condition is evaluated before the function is applied to x, hence < instead of <=
| SelvorWhim/competitive | Codewars/DeleteOccurrencesOfElementOverNTimes.py | Python | unlicense | 646 |
#!/usr/bin/env python
class BytesIO:
def __init__(self, buffer):
self._data = buffer
if not self._data:
self._data = str()
self._pos = 0
def getvalue(self):
return self._data
def close(self):
pass
def readline(self):
return self.read(self._data[self._pos:].find('\n') + 1)
def read(self, n=None):
if n == None:
n = -1
if not isinstance(n, (int, long)):
raise TypeError("Argument must be an integer")
if n < 0:
n = len(self._data)
if len(self._data) <= self._pos:
return ''
newpos = min(len(self._data), self._pos + n)
b = self._data[self._pos : newpos]
self._pos = newpos
return b
def readable(self):
return True
def writable(self):
return True
def seekable(self):
return False
| silenceli/oga-windows | ovirt-guest-agent/bytesio.py | Python | apache-2.0 | 914 |
"""
stubo.model
~~~~~~~~~~~
The Model?
:copyright: (c) 2015 by OpenCredo.
:license: GPLv3, see LICENSE for more details.
""" | rusenask/stubo-app | stubo/model/__init__.py | Python | gpl-3.0 | 155 |
import inspect
import math
import pprint
from collections.abc import Iterable
from collections.abc import Mapping
from collections.abc import Sized
from decimal import Decimal
from itertools import filterfalse
from numbers import Number
from types import TracebackType
from typing import Any
from typing import Callable
from typing import cast
from typing import Generic
from typing import Optional
from typing import Pattern
from typing import Tuple
from typing import TypeVar
from typing import Union
from more_itertools.more import always_iterable
import _pytest._code
from _pytest.compat import overload
from _pytest.compat import STRING_TYPES
from _pytest.compat import TYPE_CHECKING
from _pytest.outcomes import fail
if TYPE_CHECKING:
from typing import Type
BASE_TYPE = (type, STRING_TYPES)
def _non_numeric_type_error(value, at: Optional[str]) -> TypeError:
at_str = " at {}".format(at) if at else ""
return TypeError(
"cannot make approximate comparisons to non-numeric values: {!r} {}".format(
value, at_str
)
)
# builtin pytest.approx helper
class ApproxBase:
"""
Provide shared utilities for making approximate comparisons between numbers
or sequences of numbers.
"""
# Tell numpy to use our `__eq__` operator instead of its.
__array_ufunc__ = None
__array_priority__ = 100
def __init__(self, expected, rel=None, abs=None, nan_ok: bool = False) -> None:
__tracebackhide__ = True
self.expected = expected
self.abs = abs
self.rel = rel
self.nan_ok = nan_ok
self._check_type()
def __repr__(self) -> str:
raise NotImplementedError
def __eq__(self, actual) -> bool:
return all(
a == self._approx_scalar(x) for a, x in self._yield_comparisons(actual)
)
# Ignore type because of https://github.com/python/mypy/issues/4266.
__hash__ = None # type: ignore
def __ne__(self, actual) -> bool:
return not (actual == self)
def _approx_scalar(self, x) -> "ApproxScalar":
return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok)
def _yield_comparisons(self, actual):
"""
Yield all the pairs of numbers to be compared. This is used to
implement the `__eq__` method.
"""
raise NotImplementedError
def _check_type(self) -> None:
"""
Raise a TypeError if the expected value is not a valid type.
"""
# This is only a concern if the expected value is a sequence. In every
# other case, the approx() function ensures that the expected value has
# a numeric type. For this reason, the default is to do nothing. The
# classes that deal with sequences should reimplement this method to
# raise if there are any non-numeric elements in the sequence.
pass
def _recursive_list_map(f, x):
if isinstance(x, list):
return list(_recursive_list_map(f, xi) for xi in x)
else:
return f(x)
class ApproxNumpy(ApproxBase):
"""
Perform approximate comparisons where the expected value is numpy array.
"""
def __repr__(self) -> str:
list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())
return "approx({!r})".format(list_scalars)
def __eq__(self, actual) -> bool:
import numpy as np
# self.expected is supposed to always be an array here
if not np.isscalar(actual):
try:
actual = np.asarray(actual)
except Exception as e:
raise TypeError(
"cannot compare '{}' to numpy.ndarray".format(actual)
) from e
if not np.isscalar(actual) and actual.shape != self.expected.shape:
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
import numpy as np
# `actual` can either be a numpy array or a scalar, it is treated in
# `__eq__` before being passed to `ApproxBase.__eq__`, which is the
# only method that calls this one.
if np.isscalar(actual):
for i in np.ndindex(self.expected.shape):
yield actual, self.expected[i].item()
else:
for i in np.ndindex(self.expected.shape):
yield actual[i].item(), self.expected[i].item()
class ApproxMapping(ApproxBase):
"""
Perform approximate comparisons where the expected value is a mapping with
numeric values (the keys can be anything).
"""
def __repr__(self) -> str:
return "approx({!r})".format(
{k: self._approx_scalar(v) for k, v in self.expected.items()}
)
def __eq__(self, actual) -> bool:
if set(actual.keys()) != set(self.expected.keys()):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
for k in self.expected.keys():
yield actual[k], self.expected[k]
def _check_type(self) -> None:
__tracebackhide__ = True
for key, value in self.expected.items():
if isinstance(value, type(self.expected)):
msg = "pytest.approx() does not support nested dictionaries: key={!r} value={!r}\n full mapping={}"
raise TypeError(msg.format(key, value, pprint.pformat(self.expected)))
elif not isinstance(value, Number):
raise _non_numeric_type_error(self.expected, at="key={!r}".format(key))
class ApproxSequencelike(ApproxBase):
"""
Perform approximate comparisons where the expected value is a sequence of
numbers.
"""
def __repr__(self) -> str:
seq_type = type(self.expected)
if seq_type not in (tuple, list, set):
seq_type = list
return "approx({!r})".format(
seq_type(self._approx_scalar(x) for x in self.expected)
)
def __eq__(self, actual) -> bool:
if len(actual) != len(self.expected):
return False
return ApproxBase.__eq__(self, actual)
def _yield_comparisons(self, actual):
return zip(actual, self.expected)
def _check_type(self) -> None:
__tracebackhide__ = True
for index, x in enumerate(self.expected):
if isinstance(x, type(self.expected)):
msg = "pytest.approx() does not support nested data structures: {!r} at index {}\n full sequence: {}"
raise TypeError(msg.format(x, index, pprint.pformat(self.expected)))
elif not isinstance(x, Number):
raise _non_numeric_type_error(
self.expected, at="index {}".format(index)
)
class ApproxScalar(ApproxBase):
"""
Perform approximate comparisons where the expected value is a single number.
"""
# Using Real should be better than this Union, but not possible yet:
# https://github.com/python/typeshed/pull/3108
DEFAULT_ABSOLUTE_TOLERANCE = 1e-12 # type: Union[float, Decimal]
DEFAULT_RELATIVE_TOLERANCE = 1e-6 # type: Union[float, Decimal]
def __repr__(self) -> str:
"""
Return a string communicating both the expected value and the tolerance
for the comparison being made, e.g. '1.0 ± 1e-6', '(3+4j) ± 5e-6 ∠ ±180°'.
"""
# Infinities aren't compared using tolerances, so don't show a
# tolerance. Need to call abs to handle complex numbers, e.g. (inf + 1j)
if math.isinf(abs(self.expected)):
return str(self.expected)
# If a sensible tolerance can't be calculated, self.tolerance will
# raise a ValueError. In this case, display '???'.
try:
vetted_tolerance = "{:.1e}".format(self.tolerance)
if isinstance(self.expected, complex) and not math.isinf(self.tolerance):
vetted_tolerance += " ∠ ±180°"
except ValueError:
vetted_tolerance = "???"
return "{} ± {}".format(self.expected, vetted_tolerance)
def __eq__(self, actual) -> bool:
"""
Return true if the given value is equal to the expected value within
the pre-specified tolerance.
"""
if _is_numpy_array(actual):
# Call ``__eq__()`` manually to prevent infinite-recursion with
# numpy<1.13. See #3748.
return all(self.__eq__(a) for a in actual.flat)
# Short-circuit exact equality.
if actual == self.expected:
return True
# Allow the user to control whether NaNs are considered equal to each
# other or not. The abs() calls are for compatibility with complex
# numbers.
if math.isnan(abs(self.expected)):
return self.nan_ok and math.isnan(abs(actual))
# Infinity shouldn't be approximately equal to anything but itself, but
# if there's a relative tolerance, it will be infinite and infinity
# will seem approximately equal to everything. The equal-to-itself
# case would have been short circuited above, so here we can just
# return false if the expected value is infinite. The abs() call is
# for compatibility with complex numbers.
if math.isinf(abs(self.expected)):
return False
# Return true if the two numbers are within the tolerance.
result = abs(self.expected - actual) <= self.tolerance # type: bool
return result
# Ignore type because of https://github.com/python/mypy/issues/4266.
__hash__ = None # type: ignore
@property
def tolerance(self):
"""
Return the tolerance for the comparison. This could be either an
absolute tolerance or a relative tolerance, depending on what the user
specified or which would be larger.
"""
def set_default(x, default):
return x if x is not None else default
# Figure out what the absolute tolerance should be. ``self.abs`` is
# either None or a value specified by the user.
absolute_tolerance = set_default(self.abs, self.DEFAULT_ABSOLUTE_TOLERANCE)
if absolute_tolerance < 0:
raise ValueError(
"absolute tolerance can't be negative: {}".format(absolute_tolerance)
)
if math.isnan(absolute_tolerance):
raise ValueError("absolute tolerance can't be NaN.")
# If the user specified an absolute tolerance but not a relative one,
# just return the absolute tolerance.
if self.rel is None:
if self.abs is not None:
return absolute_tolerance
# Figure out what the relative tolerance should be. ``self.rel`` is
# either None or a value specified by the user. This is done after
# we've made sure the user didn't ask for an absolute tolerance only,
# because we don't want to raise errors about the relative tolerance if
# we aren't even going to use it.
relative_tolerance = set_default(
self.rel, self.DEFAULT_RELATIVE_TOLERANCE
) * abs(self.expected)
if relative_tolerance < 0:
raise ValueError(
"relative tolerance can't be negative: {}".format(absolute_tolerance)
)
if math.isnan(relative_tolerance):
raise ValueError("relative tolerance can't be NaN.")
# Return the larger of the relative and absolute tolerances.
return max(relative_tolerance, absolute_tolerance)
class ApproxDecimal(ApproxScalar):
"""
Perform approximate comparisons where the expected value is a decimal.
"""
DEFAULT_ABSOLUTE_TOLERANCE = Decimal("1e-12")
DEFAULT_RELATIVE_TOLERANCE = Decimal("1e-6")
def approx(expected, rel=None, abs=None, nan_ok: bool = False) -> ApproxBase:
"""
Assert that two numbers (or two sets of numbers) are equal to each other
within some tolerance.
Due to the `intricacies of floating-point arithmetic`__, numbers that we
would intuitively expect to be equal are not always so::
>>> 0.1 + 0.2 == 0.3
False
__ https://docs.python.org/3/tutorial/floatingpoint.html
This problem is commonly encountered when writing tests, e.g. when making
sure that floating-point values are what you expect them to be. One way to
deal with this problem is to assert that two floating-point numbers are
equal to within some appropriate tolerance::
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
True
However, comparisons like this are tedious to write and difficult to
understand. Furthermore, absolute comparisons like the one above are
usually discouraged because there's no tolerance that works well for all
situations. ``1e-6`` is good for numbers around ``1``, but too small for
very big numbers and too big for very small ones. It's better to express
the tolerance as a fraction of the expected value, but relative comparisons
like that are even more difficult to write correctly and concisely.
The ``approx`` class performs floating-point comparisons using a syntax
that's as intuitive as possible::
>>> from pytest import approx
>>> 0.1 + 0.2 == approx(0.3)
True
The same syntax also works for sequences of numbers::
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
True
Dictionary *values*::
>>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6})
True
``numpy`` arrays::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP
True
And for a ``numpy`` array against a scalar::
>>> import numpy as np # doctest: +SKIP
>>> np.array([0.1, 0.2]) + np.array([0.2, 0.1]) == approx(0.3) # doctest: +SKIP
True
By default, ``approx`` considers numbers within a relative tolerance of
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
This treatment would lead to surprising results if the expected value was
``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
To handle this case less surprisingly, ``approx`` also considers numbers
within an absolute tolerance of ``1e-12`` of its expected value to be
equal. Infinity and NaN are special cases. Infinity is only considered
equal to itself, regardless of the relative tolerance. NaN is not
considered equal to anything by default, but you can make it be equal to
itself by setting the ``nan_ok`` argument to True. (This is meant to
facilitate comparing arrays that use NaN to mean "no data".)
Both the relative and absolute tolerances can be changed by passing
arguments to the ``approx`` constructor::
>>> 1.0001 == approx(1)
False
>>> 1.0001 == approx(1, rel=1e-3)
True
>>> 1.0001 == approx(1, abs=1e-3)
True
If you specify ``abs`` but not ``rel``, the comparison will not consider
the relative tolerance at all. In other words, two numbers that are within
the default relative tolerance of ``1e-6`` will still be considered unequal
if they exceed the specified absolute tolerance. If you specify both
``abs`` and ``rel``, the numbers will be considered equal if either
tolerance is met::
>>> 1 + 1e-8 == approx(1)
True
>>> 1 + 1e-8 == approx(1, abs=1e-12)
False
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
True
If you're thinking about using ``approx``, then you might want to know how
it compares to other good ways of comparing floating-point numbers. All of
these algorithms are based on relative and absolute tolerances and should
agree for the most part, but they do have meaningful differences:
- ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
tolerance is met. Because the relative tolerance is calculated w.r.t.
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
``b`` is a "reference value"). You have to specify an absolute tolerance
if you want to compare to ``0.0`` because there is no tolerance by
default. Only available in python>=3.5. `More information...`__
__ https://docs.python.org/3/library/math.html#math.isclose
- ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
between ``a`` and ``b`` is less that the sum of the relative tolerance
w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
is only calculated w.r.t. ``b``, this test is asymmetric and you can
think of ``b`` as the reference value. Support for comparing sequences
is provided by ``numpy.allclose``. `More information...`__
__ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
are within an absolute tolerance of ``1e-7``. No relative tolerance is
considered and the absolute tolerance cannot be changed, so this function
is not appropriate for very large or very small numbers. Also, it's only
available in subclasses of ``unittest.TestCase`` and it's ugly because it
doesn't follow PEP8. `More information...`__
__ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
- ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
Because the relative tolerance is only calculated w.r.t. ``b``, this test
is asymmetric and you can think of ``b`` as the reference value. In the
special case that you explicitly specify an absolute tolerance but not a
relative tolerance, only the absolute tolerance is considered.
.. warning::
.. versionchanged:: 3.2
In order to avoid inconsistent behavior, ``TypeError`` is
raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons.
The example below illustrates the problem::
assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10)
assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10)
In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)``
to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to
comparison. This is because the call hierarchy of rich comparisons
follows a fixed behavior. `More information...`__
__ https://docs.python.org/3/reference/datamodel.html#object.__ge__
"""
# Delegate the comparison to a class that knows how to deal with the type
# of the expected value (e.g. int, float, list, dict, numpy.array, etc).
#
# The primary responsibility of these classes is to implement ``__eq__()``
# and ``__repr__()``. The former is used to actually check if some
# "actual" value is equivalent to the given expected value within the
# allowed tolerance. The latter is used to show the user the expected
# value and tolerance, in the case that a test failed.
#
# The actual logic for making approximate comparisons can be found in
# ApproxScalar, which is used to compare individual numbers. All of the
# other Approx classes eventually delegate to this class. The ApproxBase
# class provides some convenient methods and overloads, but isn't really
# essential.
__tracebackhide__ = True
if isinstance(expected, Decimal):
cls = ApproxDecimal # type: Type[ApproxBase]
elif isinstance(expected, Number):
cls = ApproxScalar
elif isinstance(expected, Mapping):
cls = ApproxMapping
elif _is_numpy_array(expected):
cls = ApproxNumpy
elif (
isinstance(expected, Iterable)
and isinstance(expected, Sized)
and not isinstance(expected, STRING_TYPES)
):
cls = ApproxSequencelike
else:
raise _non_numeric_type_error(expected, at=None)
return cls(expected, rel, abs, nan_ok)
def _is_numpy_array(obj: object) -> bool:
"""
Return true if the given object is a numpy array. Make a special effort to
avoid importing numpy unless it's really necessary.
"""
import sys
np = sys.modules.get("numpy") # type: Any
if np is not None:
return isinstance(obj, np.ndarray)
return False
# builtin pytest.raises helper
_E = TypeVar("_E", bound=BaseException)
@overload
def raises(
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
*,
match: "Optional[Union[str, Pattern]]" = ...
) -> "RaisesContext[_E]":
... # pragma: no cover
@overload # noqa: F811
def raises( # noqa: F811
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
func: Callable,
*args: Any,
**kwargs: Any
) -> _pytest._code.ExceptionInfo[_E]:
... # pragma: no cover
def raises( # noqa: F811
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
*args: Any,
**kwargs: Any
) -> Union["RaisesContext[_E]", _pytest._code.ExceptionInfo[_E]]:
r"""
Assert that a code block/function call raises ``expected_exception``
or raise a failure exception otherwise.
:kwparam match: if specified, a string containing a regular expression,
or a regular expression object, that is tested against the string
representation of the exception using ``re.search``. To match a literal
string that may contain `special characters`__, the pattern can
first be escaped with ``re.escape``.
(This is only used when ``pytest.raises`` is used as a context manager,
and passed through to the function otherwise.
When using ``pytest.raises`` as a function, you can use:
``pytest.raises(Exc, func, match="passed on").match("my pattern")``.)
__ https://docs.python.org/3/library/re.html#regular-expression-syntax
.. currentmodule:: _pytest._code
Use ``pytest.raises`` as a context manager, which will capture the exception of the given
type::
>>> with raises(ZeroDivisionError):
... 1/0
If the code block does not raise the expected exception (``ZeroDivisionError`` in the example
above), or no exception at all, the check will fail instead.
You can also use the keyword argument ``match`` to assert that the
exception matches a text or regex::
>>> with raises(ValueError, match='must be 0 or None'):
... raise ValueError("value must be 0 or None")
>>> with raises(ValueError, match=r'must be \d+$'):
... raise ValueError("value must be 42")
The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the
details of the captured exception::
>>> with raises(ValueError) as exc_info:
... raise ValueError("value must be 42")
>>> assert exc_info.type is ValueError
>>> assert exc_info.value.args[0] == "value must be 42"
.. note::
When using ``pytest.raises`` as a context manager, it's worthwhile to
note that normal context manager rules apply and that the exception
raised *must* be the final line in the scope of the context manager.
Lines of code after that, within the scope of the context manager will
not be executed. For example::
>>> value = 15
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
... assert exc_info.type is ValueError # this will not execute
Instead, the following approach must be taken (note the difference in
scope)::
>>> with raises(ValueError) as exc_info:
... if value > 10:
... raise ValueError("value must be <= 10")
...
>>> assert exc_info.type is ValueError
**Using with** ``pytest.mark.parametrize``
When using :ref:`pytest.mark.parametrize ref`
it is possible to parametrize tests such that
some runs raise an exception and others do not.
See :ref:`parametrizing_conditional_raising` for an example.
**Legacy form**
It is possible to specify a callable by passing a to-be-called lambda::
>>> raises(ZeroDivisionError, lambda: 1/0)
<ExceptionInfo ...>
or you can specify an arbitrary callable with arguments::
>>> def f(x): return 1/x
...
>>> raises(ZeroDivisionError, f, 0)
<ExceptionInfo ...>
>>> raises(ZeroDivisionError, f, x=0)
<ExceptionInfo ...>
The form above is fully supported but discouraged for new code because the
context manager form is regarded as more readable and less error-prone.
.. note::
Similar to caught exception objects in Python, explicitly clearing
local references to returned ``ExceptionInfo`` objects can
help the Python interpreter speed up its garbage collection.
Clearing those references breaks a reference cycle
(``ExceptionInfo`` --> caught exception --> frame stack raising
the exception --> current frame stack --> local variables -->
``ExceptionInfo``) which makes Python keep all objects referenced
from that cycle (including all local variables in the current
frame) alive until the next cyclic garbage collection run.
More detailed information can be found in the official Python
documentation for :ref:`the try statement <python:try>`.
"""
__tracebackhide__ = True
for exc in filterfalse(
inspect.isclass, always_iterable(expected_exception, BASE_TYPE)
):
msg = "exceptions must be derived from BaseException, not %s"
raise TypeError(msg % type(exc))
message = "DID NOT RAISE {}".format(expected_exception)
if not args:
match = kwargs.pop("match", None)
if kwargs:
msg = "Unexpected keyword arguments passed to pytest.raises: "
msg += ", ".join(sorted(kwargs))
msg += "\nUse context-manager form instead?"
raise TypeError(msg)
return RaisesContext(expected_exception, message, match)
else:
func = args[0]
if not callable(func):
raise TypeError(
"{!r} object (type: {}) must be callable".format(func, type(func))
)
try:
func(*args[1:], **kwargs)
except expected_exception as e:
# We just caught the exception - there is a traceback.
assert e.__traceback__ is not None
return _pytest._code.ExceptionInfo.from_exc_info(
(type(e), e, e.__traceback__)
)
fail(message)
# This doesn't work with mypy for now. Use fail.Exception instead.
raises.Exception = fail.Exception # type: ignore
class RaisesContext(Generic[_E]):
def __init__(
self,
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
message: str,
match_expr: Optional[Union[str, "Pattern"]] = None,
) -> None:
self.expected_exception = expected_exception
self.message = message
self.match_expr = match_expr
self.excinfo = None # type: Optional[_pytest._code.ExceptionInfo[_E]]
def __enter__(self) -> _pytest._code.ExceptionInfo[_E]:
self.excinfo = _pytest._code.ExceptionInfo.for_later()
return self.excinfo
def __exit__(
self,
exc_type: Optional["Type[BaseException]"],
exc_val: Optional[BaseException],
exc_tb: Optional[TracebackType],
) -> bool:
__tracebackhide__ = True
if exc_type is None:
fail(self.message)
assert self.excinfo is not None
if not issubclass(exc_type, self.expected_exception):
return False
# Cast to narrow the exception type now that it's verified.
exc_info = cast(
Tuple["Type[_E]", _E, TracebackType], (exc_type, exc_val, exc_tb)
)
self.excinfo.fill_unfilled(exc_info)
if self.match_expr is not None:
self.excinfo.match(self.match_expr)
return True
| JoelMarcey/buck | third-party/py/pytest/src/_pytest/python_api.py | Python | apache-2.0 | 28,612 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('djstripe', '0006_auto_20150602_1934'),
]
operations = [
migrations.AddField(
model_name='customer',
name='card_exp_month',
field=models.PositiveIntegerField(null=True, blank=True),
),
migrations.AddField(
model_name='customer',
name='card_exp_year',
field=models.PositiveIntegerField(null=True, blank=True),
),
]
| mthornhill/dj-stripe | djstripe/migrations/0007_auto_20150625_1243.py | Python | bsd-3-clause | 609 |
# Copyright 2014 Objectif Libre
# Copyright 2015 Dot Hill Systems Corp.
# Copyright 2016 Seagate Technology or one of its affiliates
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import hashlib
import math
import time
from lxml import etree
from oslo_log import log as logging
from oslo_utils import units
import requests
import six
from cinder import coordination
from cinder import exception
from cinder.i18n import _
LOG = logging.getLogger(__name__)
class DotHillClient(object):
def __init__(self, host, login, password, protocol, ssl_verify):
self._mgmt_ip_addrs = list(map(str.strip, host.split(',')))
self._login = login
self._password = password
self._protocol = protocol
self._session_key = None
self.ssl_verify = ssl_verify
self._set_host(self._mgmt_ip_addrs[0])
self._fw = ''
self._driver_name = self.__class__.__name__.split('.')[0]
self._array_name = 'unknown'
self._luns_in_use_by_host = {}
def _set_host(self, ip_addr):
self._curr_ip_addr = ip_addr
self._base_url = "%s://%s/api" % (self._protocol, ip_addr)
def _get_auth_token(self, xml):
"""Parse an XML authentication reply to extract the session key."""
self._session_key = None
try:
tree = etree.XML(xml)
if (tree.findtext(".//PROPERTY[@name='response-type']") ==
"success"):
self._session_key = (
tree.findtext(".//PROPERTY[@name='response']"))
except Exception as e:
msg = _("Cannot parse session key: %s") % e.msg
raise exception.DotHillConnectionError(message=msg)
def login(self):
if self._session_key is None:
return self.session_login()
def session_login(self):
"""Authenticates the service on the device.
Tries all the IP addrs listed in the san_ip parameter
until a working one is found or the list is exhausted.
"""
try:
self._get_session_key()
self.get_firmware_version()
if not self._array_name or self._array_name == 'unknown':
self._array_name = self.get_serial_number()
LOG.debug("Logged in to array %s at %s (session %s)",
self._array_name, self._base_url, self._session_key)
return
except exception.DotHillConnectionError:
not_responding = self._curr_ip_addr
LOG.exception('session_login failed to connect to %s',
self._curr_ip_addr)
# Loop through the remaining management addresses
# to find one that's up.
for host in self._mgmt_ip_addrs:
if host is not_responding:
continue
self._set_host(host)
try:
self._get_session_key()
return
except exception.DotHillConnectionError:
LOG.error('Failed to connect to %s',
self._curr_ip_addr)
continue
raise exception.DotHillConnectionError(
message=_("Failed to log in to management controller"))
@coordination.synchronized('{self._driver_name}-{self._array_name}')
def _get_session_key(self):
"""Retrieve a session key from the array."""
self._session_key = None
hash_ = "%s_%s" % (self._login, self._password)
if six.PY3:
hash_ = hash_.encode('utf-8')
hash_ = hashlib.md5(hash_)
digest = hash_.hexdigest()
url = self._base_url + "/login/" + digest
try:
xml = requests.get(url, verify=self.ssl_verify, timeout=30)
except requests.exceptions.RequestException:
msg = _("Failed to obtain MC session key")
LOG.exception(msg)
raise exception.DotHillConnectionError(message=msg)
self._get_auth_token(xml.text.encode('utf8'))
LOG.debug("session key = %s", self._session_key)
if self._session_key is None:
raise exception.DotHillAuthenticationError
def _assert_response_ok(self, tree):
"""Parses the XML returned by the device to check the return code.
Raises a DotHillRequestError error if the return code is not 0
or if the return code is None.
"""
# Get the return code for the operation, raising an exception
# if it is not present.
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if not return_code:
raise exception.DotHillRequestError(message="No status found")
# If no error occurred, just return.
if return_code == '0':
return
# Format a message for the status code.
msg = "%s (%s)" % (tree.findtext(".//PROPERTY[@name='response']"),
return_code)
raise exception.DotHillRequestError(message=msg)
def _build_request_url(self, path, *args, **kargs):
url = self._base_url + path
if kargs:
url += '/' + '/'.join(["%s/%s" % (k.replace('_', '-'), v)
for (k, v) in kargs.items()])
if args:
url += '/' + '/'.join(args)
return url
def _request(self, path, *args, **kargs):
"""Performs an API request on the array, with retry.
Propagates a DotHillConnectionError if no valid response is
received from the array, e.g. if the network is down.
Propagates a DotHillRequestError if the device returned a response
but the status is not 0. The device error message will be used
in the exception message.
If the status is OK, returns the XML data for further processing.
"""
tries_left = 2
while tries_left > 0:
try:
return self._api_request(path, *args, **kargs)
except exception.DotHillConnectionError as e:
if tries_left < 1:
LOG.error("Array Connection error: "
"%s (no more retries)", e.msg)
raise
# Retry on any network connection errors, SSL errors, etc
LOG.error("Array Connection error: %s (retrying)", e.msg)
except exception.DotHillRequestError as e:
if tries_left < 1:
LOG.error("Array Request error: %s (no more retries)",
e.msg)
raise
# Retry specific errors which may succeed if we log in again
# -10027 => The user is not recognized on this system.
if '(-10027)' in e.msg:
LOG.error("Array Request error: %s (retrying)", e.msg)
else:
raise
tries_left -= 1
self.session_login()
@coordination.synchronized('{self._driver_name}-{self._array_name}')
def _api_request(self, path, *args, **kargs):
"""Performs an HTTP request on the device, with locking.
Raises a DotHillRequestError if the device returned but the status is
not 0. The device error message will be used in the exception message.
If the status is OK, returns the XML data for further processing.
"""
url = self._build_request_url(path, *args, **kargs)
LOG.debug("Array Request URL: %s (session %s)",
url, self._session_key)
headers = {'dataType': 'api', 'sessionKey': self._session_key}
try:
xml = requests.get(url, headers=headers,
verify=self.ssl_verify, timeout=60)
tree = etree.XML(xml.text.encode('utf8'))
except Exception as e:
message = _("Exception handling URL %(url)s: %(msg)s") % {
'url': url, 'msg': e}
raise exception.DotHillConnectionError(message=message)
if path == "/show/volumecopy-status":
return tree
self._assert_response_ok(tree)
return tree
def logout(self):
pass
def session_logout(self):
url = self._base_url + '/exit'
try:
requests.get(url, verify=self.ssl_verify, timeout=30)
return True
except Exception:
return False
def is_titanium(self):
"""True if array is an older generation."""
return True if len(self._fw) > 0 and self._fw[0] == 'T' else False
def create_volume(self, name, size, backend_name, backend_type):
# NOTE: size is in this format: [0-9]+GiB
path_dict = {'size': size}
if backend_type == "linear":
path_dict['vdisk'] = backend_name
else:
path_dict['pool'] = backend_name
try:
self._request("/create/volume", name, **path_dict)
except exception.DotHillRequestError as e:
# -10186 => The specified name is already in use.
# This can occur during controller failover.
if '(-10186)' in e.msg:
LOG.warning("Ignoring error in create volume: %s", e.msg)
return None
raise
return None
def delete_volume(self, name):
try:
self._request("/delete/volumes", name)
except exception.DotHillRequestError as e:
# -10075 => The specified volume was not found.
# This can occur during controller failover.
if '(-10075)' in e.msg:
LOG.warning("Ignorning error while deleting %(volume)s:"
" %(reason)s",
{'volume': name, 'reason': e.msg})
return
raise
def extend_volume(self, name, added_size):
self._request("/expand/volume", name, size=added_size)
def create_snapshot(self, volume_name, snap_name):
try:
self._request("/create/snapshots", snap_name, volumes=volume_name)
except exception.DotHillRequestError as e:
# -10186 => The specified name is already in use.
# This can occur during controller failover.
if '(-10186)' in e.msg:
LOG.warning("Ignoring error attempting to create snapshot:"
" %s", e.msg)
return None
def delete_snapshot(self, snap_name):
try:
self._request("/delete/snapshot", "cleanup", snap_name)
except exception.DotHillRequestError as e:
# -10050 => The volume was not found on this system.
# This can occur during controller failover.
if '(-10050)' in e.msg:
LOG.warning("Ignoring unmap error -10050: %s", e.msg)
return None
raise
def backend_exists(self, backend_name, backend_type):
try:
if backend_type == "linear":
path = "/show/vdisks"
else:
path = "/show/pools"
self._request(path, backend_name)
return True
except exception.DotHillRequestError:
return False
def _get_size(self, size):
return int(math.ceil(float(size) * 512 / (units.G)))
def backend_stats(self, backend_name, backend_type):
stats = {'free_capacity_gb': 0,
'total_capacity_gb': 0}
prop_list = []
if backend_type == "linear":
path = "/show/vdisks"
prop_list = ["size-numeric", "freespace-numeric"]
else:
path = "/show/pools"
prop_list = ["total-size-numeric", "total-avail-numeric"]
tree = self._request(path, backend_name)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[0])
if size:
stats['total_capacity_gb'] = self._get_size(size)
size = tree.findtext(".//PROPERTY[@name='%s']" % prop_list[1])
if size:
stats['free_capacity_gb'] = self._get_size(size)
return stats
def list_luns_for_host(self, host):
tree = self._request("/show/host-maps", host)
return [int(prop.text) for prop in tree.xpath(
"//PROPERTY[@name='lun']")]
def _get_first_available_lun_for_host(self, host):
"""Find next available LUN number.
Returns a lun number greater than 0 which is not known to be in
use between the array and the specified host.
"""
luns = self.list_luns_for_host(host)
self._luns_in_use_by_host[host] = luns
lun = 1
while True:
if lun not in luns:
return lun
lun += 1
def _get_next_available_lun_for_host(self, host, after=0):
# host can be a comma-separated list of WWPNs; we only use the first.
firsthost = host.split(',')[0]
LOG.debug('get_next_available_lun: host=%s, firsthost=%s, after=%d',
host, firsthost, after)
if after == 0:
return self._get_first_available_lun_for_host(firsthost)
luns = self._luns_in_use_by_host[firsthost]
lun = after + 1
while lun < 1024:
LOG.debug('get_next_available_lun: host=%s, trying lun %d',
firsthost, lun)
if lun not in luns:
LOG.debug('get_next_available_lun: host=%s, RETURNING lun %d',
firsthost, lun)
return lun
lun += 1
raise exception.DotHillRequestError(
message=_("No LUNs available for mapping to host %s.") % host)
@coordination.synchronized('{self._driver_name}-{self._array_name}-map')
def map_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
lun = self._get_first_available_lun_for_host(connector['wwpns'][0])
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
host_status = self._check_host(host)
if host_status != 0:
hostname = self._safe_hostname(connector['host'])
try:
self._request("/create/host", hostname, id=host)
except exception.DotHillRequestError as e:
# -10058: The host identifier or nickname is already in use
if '(-10058)' in e.msg:
LOG.error("While trying to create host nickname"
" %(nickname)s: %(error_msg)s",
{'nickname': hostname,
'error_msg': e.msg})
else:
raise
lun = self._get_first_available_lun_for_host(host)
while lun < 255:
try:
self._request("/map/volume",
volume_name,
lun=str(lun),
host=host,
access="rw")
return lun
except exception.DotHillRequestError as e:
# -3177 => "The specified LUN overlaps a previously defined LUN
if '(-3177)' in e.msg:
LOG.info("Unable to map volume"
" %(volume_name)s to lun %(lun)d:"
" %(reason)s",
{'volume_name': volume_name,
'lun': lun, 'reason': e.msg})
lun = self._get_next_available_lun_for_host(host,
after=lun)
continue
raise
except Exception as e:
LOG.error("Error while mapping volume"
" %(volume_name)s to lun %(lun)d:",
{'volume_name': volume_name, 'lun': lun},
e)
raise
raise exception.DotHillRequestError(
message=_("Failed to find a free LUN for host %s") % host)
def unmap_volume(self, volume_name, connector, connector_element):
if connector_element == 'wwpns':
host = ",".join(connector['wwpns'])
else:
host = connector['initiator']
try:
self._request("/unmap/volume", volume_name, host=host)
except exception.DotHillRequestError as e:
# -10050 => The volume was not found on this system.
# This can occur during controller failover.
if '(-10050)' in e.msg:
LOG.warning("Ignoring unmap error -10050: %s", e.msg)
return None
raise
def get_active_target_ports(self):
ports = []
tree = self._request("/show/ports")
for obj in tree.xpath("//OBJECT[@basetype='port']"):
port = {prop.get('name'): prop.text
for prop in obj.iter("PROPERTY")
if prop.get('name') in
["port-type", "target-id", "status"]}
if port['status'] == 'Up':
ports.append(port)
return ports
def get_active_fc_target_ports(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "FC"]
def get_active_iscsi_target_iqns(self):
return [port['target-id'] for port in self.get_active_target_ports()
if port['port-type'] == "iSCSI"]
def linear_copy_volume(self, src_name, dest_name, dest_bknd_name):
"""Copy a linear volume."""
self._request("/volumecopy",
dest_name,
dest_vdisk=dest_bknd_name,
source_volume=src_name,
prompt='yes')
# The copy has started; now monitor until the operation completes.
count = 0
while True:
tree = self._request("/show/volumecopy-status")
return_code = tree.findtext(".//PROPERTY[@name='return-code']")
if return_code == '0':
status = tree.findtext(".//PROPERTY[@name='progress']")
progress = False
if status:
progress = True
LOG.debug("Volume copy is in progress: %s", status)
if not progress:
LOG.debug("Volume copy completed: %s", status)
break
else:
if count >= 5:
LOG.error('Error in copying volume: %s', src_name)
raise exception.DotHillRequestError
time.sleep(1)
count += 1
time.sleep(5)
def copy_volume(self, src_name, dest_name, dest_bknd_name,
backend_type='virtual'):
"""Copy a linear or virtual volume."""
if backend_type == 'linear':
return self.linear_copy_volume(src_name, dest_name, dest_bknd_name)
# Copy a virtual volume to another in the same pool.
self._request("/copy/volume", src_name, name=dest_name)
LOG.debug("Volume copy of source_volume: %(src_name)s to "
"destination_volume: %(dest_name)s started.",
{'src_name': src_name, 'dest_name': dest_name, })
# Loop until this volume copy is no longer in progress.
while self.volume_copy_in_progress(src_name):
time.sleep(5)
# Once the copy operation is finished, check to ensure that
# the volume was not deleted because of a subsequent error. An
# exception will be raised if the named volume is not present.
self._request("/show/volumes", dest_name)
LOG.debug("Volume copy of source_volume: %(src_name)s to "
"destination_volume: %(dest_name)s completed.",
{'src_name': src_name, 'dest_name': dest_name, })
def volume_copy_in_progress(self, src_name):
"""Check if a volume copy is in progress for the named volume."""
# 'show volume-copies' always succeeds, even if none in progress.
tree = self._request("/show/volume-copies")
# Find 0 or 1 job(s) with source volume we're interested in
q = "OBJECT[PROPERTY[@name='source-volume']/text()='%s']" % src_name
joblist = tree.xpath(q)
if len(joblist) == 0:
return False
LOG.debug("Volume copy of volume: %(src_name)s is "
"%(pc)s percent completed.",
{'src_name': src_name,
'pc': joblist[0].findtext("PROPERTY[@name='progress']"), })
return True
def _check_host(self, host):
host_status = -1
tree = self._request("/show/hosts")
for prop in tree.xpath("//PROPERTY[@name='host-id' and text()='%s']"
% host):
host_status = 0
return host_status
def _safe_hostname(self, hostname):
"""Modify an initiator name to match firmware requirements.
Initiator name cannot include certain characters and cannot exceed
15 bytes in 'T' firmware (31 bytes in 'G' firmware).
"""
for ch in [',', '"', '\\', '<', '>']:
if ch in hostname:
hostname = hostname.replace(ch, '')
hostname = hostname.replace('.', '_')
name_limit = 15 if self.is_titanium() else 31
index = len(hostname)
if index > name_limit:
index = name_limit
return hostname[:index]
def get_active_iscsi_target_portals(self):
# This function returns {'ip': status,}
portals = {}
prop = 'ip-address'
tree = self._request("/show/ports")
for el in tree.xpath("//PROPERTY[@name='primary-ip-address']"):
prop = 'primary-ip-address'
break
iscsi_ips = [ip.text for ip in tree.xpath(
"//PROPERTY[@name='%s']" % prop)]
if not iscsi_ips:
return portals
for index, port_type in enumerate(tree.xpath(
"//PROPERTY[@name='port-type' and text()='iSCSI']")):
status = port_type.getparent().findtext("PROPERTY[@name='status']")
if status == 'Up':
portals[iscsi_ips[index]] = status
return portals
def get_chap_record(self, initiator_name):
tree = self._request("/show/chap-records")
for prop in tree.xpath("//PROPERTY[@name='initiator-name' and "
"text()='%s']" % initiator_name):
chap_secret = prop.getparent().findtext("PROPERTY[@name='initiator"
"-secret']")
return chap_secret
def create_chap_record(self, initiator_name, chap_secret):
self._request("/create/chap-record",
name=initiator_name,
secret=chap_secret)
def get_serial_number(self):
tree = self._request("/show/system")
return tree.findtext(".//PROPERTY[@name='midplane-serial-number']")
def get_owner_info(self, backend_name, backend_type):
if backend_type == 'linear':
tree = self._request("/show/vdisks", backend_name)
else:
tree = self._request("/show/pools", backend_name)
return tree.findtext(".//PROPERTY[@name='owner']")
def modify_volume_name(self, old_name, new_name):
self._request("/set/volume", old_name, name=new_name)
def get_volume_size(self, volume_name):
tree = self._request("/show/volumes", volume_name)
size = tree.findtext(".//PROPERTY[@name='size-numeric']")
return self._get_size(size)
def get_firmware_version(self):
tree = self._request("/show/controllers")
self._fw = tree.xpath("//PROPERTY[@name='sc-fw']")[0].text
LOG.debug("Array firmware is %s\n", self._fw)
return self._fw
| eharney/cinder | cinder/volume/drivers/dothill/dothill_client.py | Python | apache-2.0 | 24,559 |
import re
import sys
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup # Always prefer setuptools over distutils
def requirements_from_file(filename):
"""Parses a pip requirements file into a list."""
return [line.strip() for line in open(filename, 'r')
if line.strip() and not line.strip().startswith('--')]
def read(fname, URL, URLImage):
"""Read the content of a file."""
readme = open(path.join(path.dirname(__file__), fname)).read()
if hasattr(readme, 'decode'):
# In Python 3, turn bytes into str.
readme = readme.decode('utf8')
# turn relative links into absolute ones
readme = re.sub(r'`<([^>]*)>`__',
r'`\1 <' + URL + r"/blob/main/\1>`__",
readme)
readme = re.sub(r"\.\. image:: /", ".. image:: " + URLImage + "/", readme)
return readme
here = path.abspath(path.dirname(__file__))
about = {}
with open(path.join(here, 'mechanicalsoup', '__version__.py'),
'r', 'utf-8') as f:
exec(f.read(), about)
# Don't install pytest-runner on every setup.py run, just for tests.
# See https://pypi.org/project/pytest-runner/#conditional-requirement
needs_pytest = {'pytest', 'test', 'ptr'}.intersection(sys.argv)
pytest_runner = ['pytest-runner'] if needs_pytest else []
setup(
name=about['__title__'],
# useful: python setup.py sdist bdist_wheel upload
version=about['__version__'],
description=about['__description__'],
long_description=read('README.rst', about['__github_url__'], about[
'__github_assets_absoluteURL__']),
url=about['__url__'],
license=about['__license__'],
python_requires='>=3.6',
classifiers=[
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3 :: Only',
],
packages=['mechanicalsoup'],
# List run-time dependencies here. These will be installed by pip
# when your project is installed. For an analysis of
# "install_requires" vs pip's requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=requirements_from_file('requirements.txt'),
setup_requires=pytest_runner,
tests_require=requirements_from_file('tests/requirements.txt'),
)
| MechanicalSoup/MechanicalSoup | setup.py | Python | mit | 2,758 |
#!/usr/bin/env python
#=========================================================================
# Copyright (c) 2014 Geoscience Australia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither Geoscience Australia nor the names of its contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#=========================================================================
'''
THREDDSCatalog Class
Created on 22/03/2016
@author: Alex Ip
'''
import os
import re
from collections import OrderedDict
import logging
import subprocess
#from osgeo import gdal, osr
import numpy as np
import netCDF4
from owslib.csw import CatalogueServiceWeb
from owslib.fes import PropertyIsEqualTo # , PropertyIsLike, BBox
from datetime import datetime
import tempfile
import dateutil.parser
from dateutil import tz
import pytz
import yaml
from collections import OrderedDict
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO) # Logging level for this module
# Ignore failed import of URL modules
try:
import urllib
except:
logger.warning(
'WARNING: Unable to import urllib. Any HTML function calls will fail.')
try:
import lxml.html
except:
logger.warning(
'WARNING: Unable to import lxml.html. Any HTML function calls will fail.')
class THREDDSCatalog(object):
'''
Class definition for THREDDSCatalog
'''
# DEFAULT_THREDDS_CATALOGUE_URL = 'http://dapds00.nci.org.au/thredds/catalog.html'
DEFAULT_THREDDS_CATALOGUE_URLS = 'http://dapds00.nci.org.au/thredds/catalog/rr2/National_Coverages/catalog.html,\
http://dapds00.nci.org.au/thredds/catalog/rr2/National_Coverages/http/catalog.html'
def __init__(self, thredds_catalog_urls=None,
yaml_path=None, verbose=False):
'''
Constructor for class THREDDSCatalog
Launches a crawler to examine every THREDDS catalog page underneath the nominated thredds_catalog_urls
'''
assert (yaml_path and not thredds_catalog_urls) or (
thredds_catalog_urls and not yaml_path), 'yaml_path or thredds_catalog_urls should be specified, but not both.'
self.verbose = verbose
if yaml_path:
self.load(yaml_path)
else:
thredds_catalog_urls = thredds_catalog_urls or self.DEFAULT_THREDDS_CATALOGUE_URLS
self.thredds_catalog_url_list = [thredds_catalog_url.strip() for thredds_catalog_url in thredds_catalog_urls.split(',')]
# Top level dict is an OrderedDict to implement prioritised search through specified catalogues
self.thredds_catalog_dict = OrderedDict((thredds_catalog_url, self.get_thredds_dict(thredds_catalog_url))
for thredds_catalog_url in self.thredds_catalog_url_list
)
def get_thredds_dict(self, thredds_catalog_urls):
'''
get_thredds_dict - recursive function to parse specified THREDDS catalogue URL and return a nested dict
Parameter: thredds_catalog_urls - string specifying URL of THREDDS catalog
'''
def get_absolute_url(href):
# Create absolute URL
if href.startswith(
'/'): # Absolute href - should start with "/thredds/"
return re.sub('/thredds/.*', href, thredds_catalog_urls)
else: # Relative href
return re.sub('catalog.html$', href, thredds_catalog_urls)
thredds_catalog_dict = {}
if self.verbose:
logger.info('Opening %s', thredds_catalog_urls)
data = urllib.urlopen(thredds_catalog_urls).read()
logger.debug('data = %s', data)
tree = lxml.html.fromstring(data)
title = tree.find('.//title')
title_text = [e.strip() for e in title.xpath(
'.//text()') if len(e.strip()) > 0][0]
logger.debug('title_text = %s', title_text)
if title_text == 'Catalog Services': # This is a landing page for a file
# Iterate through all service endpoints for file
for ol in tree.iterfind('.//ol'):
for li in ol.iterfind('.//li'):
text = [e.strip() for e in li.xpath(
'.//text()') if len(e.strip()) > 0]
if len(text) == 0: # No li text found
continue
endpoint_type = text[0].replace(':', '')
url = get_absolute_url(text[1])
#======================================================================
# # Not sure why a isn't found
# a = li.find('.//a')
# if a is None:
# logger.debug('a not found')
# continue
#
# href = a.get('href')
# if href is None:
# logger.debug('href not found')
# continue
#
# url = get_absolute_url(href)
#======================================================================
logger.debug(
'Service endpoint: endpoint_type = %s, href = %s', endpoint_type, url)
thredds_catalog_dict[endpoint_type] = url
break # Only process first "<ol>"
else: # Catalog page for virtual subdirectory
for table in tree.iterfind('.//table'):
# first_row = True
for row in table.iterfind('.//tr'):
a = row.find('.//a')
if a is None:
continue
href = a.get('href')
logger.debug('href = %s', href)
if href is None:
continue
url = get_absolute_url(href)
if href.endswith('catalog.html'): # Virtual subdirectory
logger.debug('Virtual subdirectory: url = %s', url)
thredds_catalog_dict[url] = self.get_thredds_dict(url)
elif href.startswith('catalog.html?dataset='): # File
filename = os.path.basename(href)
logger.debug(
'File: filename = %s, url = %s', filename, url)
try:
thredds_catalog_dict[
url] = self.get_thredds_dict(url)
except Exception as e:
logger.error('ERROR: %s', e.message)
# Get rid of empty dicts
if thredds_catalog_dict.get(url) == {}:
del thredds_catalog_dict[url]
logger.debug('thredds_catalog_dict = %s', thredds_catalog_dict)
return thredds_catalog_dict
def dump(self, yaml_path=None):
yaml_path = os.path.abspath(yaml_path or (re.sub('\W', '_', re.sub(
'http://', '', self.thredds_catalog_dict.keys()[0])) + '.yaml'))
yaml_file = open(yaml_path, 'w')
yaml.dump(self.thredds_catalog_dict, yaml_file)
yaml_file.close()
logger.info('THREDDS catalogue dumped to file %s', yaml_path)
def load(self, yaml_path):
yaml_file = open(yaml_path, 'r')
self.thredds_catalog_dict = yaml.load(yaml_file)
yaml_file.close()
logger.info('THREDDS catalogue loaded from file %s', yaml_path)
def endpoint_tuple_list(self, type_filter='.*',
url_filter='.*', catalog_dict=None):
'''
Function to return a list of (protocol, endpoint) tuples contained in the leaf nodes of self.thredds_catalog_dict
Arguments:
type_filter: regular expression string matching one or more of ['HTTPServer', 'NetcdfSubset', OPENDAP', 'WCS, 'WMS']
url_filter: regular expression string to restrict URLs returned: e.g. '.*\.nc$' to return all NetCDF endpoints
'''
result_list = []
catalog_dict = catalog_dict or self.thredds_catalog_dict
for key in sorted(catalog_dict.keys()):
value = catalog_dict[key]
if isinstance(value, dict):
result_list += self.endpoint_tuple_list(
type_filter, url_filter, catalog_dict[key])
else: # Leaf node
if (re.search(type_filter, key)
and re.search(url_filter, value)):
result_list.append((key, value))
return result_list
def endpoint_list(self, type_filter='.*',
url_filter='.*', catalog_dict=None):
'''
Function to return a list of endpoints contained in the leaf nodes of self.thredds_catalog_dict
Arguments:
type_filter: regular expression string matching one or more of ['HTTPServer', 'NetcdfSubset', OPENDAP', 'WCS, 'WMS']
url_filter: regular expression string to restrict URLs returned: e.g. '.*\.nc$' to return all NetCDF endpoints
'''
return [endpoint for _protocol, endpoint in self.endpoint_tuple_list(
type_filter=type_filter, url_filter=url_filter, catalog_dict=catalog_dict)]
def find_url_list(self, file_path):
'''
Function to return list of (<protocol>, <url>) tuples for a given filename
Returns empty list for failed match.
N.B: Only *nix supported
'''
# Narrow down search to tuples matching basename
basename = os.path.basename(file_path)
base_list = self.endpoint_tuple_list(
type_filter='.*', url_filter=basename)
if base_list:
logger.debug('%d possible URLs initially found for basename %s', len(
base_list), basename)
else: # Nothing found
logger.debug('No possible URLs found for basename %s', basename)
return []
# Find URL matches for longest possible sub-path
find_path = os.path.abspath(file_path)
find_list = []
while find_path and not find_list:
logger.debug('Searching for %s in URL list' % find_path)
find_list = [(protocol, url)
for protocol, url in base_list if url.find(find_path)]
if find_list: # Matches found for maximum-length sub-path
logger.debug('%d URLs found for %s', len(find_list), find_path)
return find_list # Search complete
elif '/' in find_path: # More leading directories to strip
# Strip leading directory from find_path
find_path = re.sub('^[^/]*/', '', find_path)
else: # Nothing found for basename - should never happen
logger.debug('No URLs found for %s', find_path)
return []
def find_url_dict(self, file_path):
'''
Function to return dict of {<protocol>: <url>, <protocol>: <url>,...} for a given filename
Returns empty dict for failed match, returns first of multiple URLs when duplicate protocols found
'''
# Convert list of tuples to dict - remove duplicate protocols
result_dict = {}
for protocol, url in self.find_url_list(file_path):
existing_url = result_dict.get(protocol)
# Keep the shorter of the two URLs when duplicate protocols found
if existing_url is None:
result_dict[protocol] = url
return result_dict
def find_catalogs(self, file_path, distribution_types=[
'NetcdfSubset'], catalog_dict=None):
'''
Recursive function to return list of catalog URLS containing specified distribution type(s) for specified file_path
Returns empty dict for failed match, keeps the shorter of two URLs when duplicates found
'''
basename = os.path.basename(file_path)
base_list = []
catalog_dict = catalog_dict or self.thredds_catalog_dict
for key in sorted(catalog_dict.keys()):
value = catalog_dict[key]
if isinstance(value, dict):
if re.search(
basename + '$', key) and (set(distribution_types) <= set(value.keys())):
base_list.append(key)
else:
base_list += self.find_catalogs(file_path,
distribution_types, value)
if not base_list:
return base_list
# Find URL matches for longest possible sub-path
find_path = os.path.abspath(file_path)
find_list = []
while find_path and not find_list:
logger.debug('Searching for %s in URL list' % find_path)
find_list = [catalog_url for catalog_url in base_list if re.search(
find_path + '$', catalog_url)]
if find_list: # Matches found for maximum-length sub-path
logger.debug('%d URLs found for %s', len(find_list), find_path)
return find_list # Search complete
elif '/' in find_path: # More leading directories to strip
# Strip leading directory from find_path
find_path = re.sub('^[^/]*/', '', find_path)
else: # Nothing found for basename - should never happen
logger.debug('No URLs found for %s', find_path)
return []
def indented_text(self, subtree_dict=None, level=0):
'''
Recursive function to return indented text representing catalog tree
'''
subtree_dict = subtree_dict or self.thredds_catalog_dict
result = ''
key_list = subtree_dict.keys()
if type(subtree_dict) != OrderedDict:
key_list = sorted(key_list)
for key in key_list:
result += (' ' * level) + key
value = subtree_dict[key]
if type(value) == str:
result += ': ' + value + '\n'
elif type(value) == dict:
result += '\n' + self.indented_text(value, level+1)
else:
raise Exception('Unrecognised value type')
return result
| alex-ip/geophys2netcdf | geophys2netcdf/thredds_catalog/__init__.py | Python | apache-2.0 | 15,718 |
# -*- coding: utf-8 -*-
'''
State module to manage Elasticsearch indices
.. versionadded:: 2015.8.0
'''
# Import python libs
from __future__ import absolute_import
import logging
# Import salt libs
log = logging.getLogger(__name__)
def absent(name):
'''
Ensure that the named index is absent
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
index_exists = __salt__['elasticsearch.index_exists'](index=name)
if index_exists:
if __opts__['test']:
ret['comment'] = 'Index {0} will be removed'.format(name)
ret['result'] = None
else:
ret['result'] = __salt__['elasticsearch.index_delete'](index=name)
if ret['result']:
ret['comment'] = 'Removed index {0} successfully'.format(name)
# TODO show pending changes (body)
else:
ret['comment'] = 'Failed to remove index {0}'.format(name) # TODO error handling
elif not index_exists:
ret['comment'] = 'Index {0} is already absent'.format(name)
else:
ret['comment'] = 'Failed to determine whether index {0} is absent, see Minion log for more information'.format(name)
ret['result'] = False
return ret
def present(name, definition):
'''
Ensure that the named index is present
'''
ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''}
index_exists = __salt__['elasticsearch.index_exists'](name=name)
if not index_exists:
if __opts__['test']:
ret['comment'] = 'Index {0} will be created'.format(name)
ret['result'] = None
else:
ret['result'] = __salt__['elasticsearch.index_create'](index=name, body=definition)
# TODO show pending changes (body)
if ret['result']:
ret['comment'] = 'Created index {0} successfully'.format(name)
elif index_exists:
ret['comment'] = 'Index {0} is already present'.format(name)
else:
ret['comment'] = 'Failed to determine whether index {0} is present, see Minion log for more information'.format(name)
ret['result'] = False
return ret
| smallyear/linuxLearn | salt/salt/states/elasticsearch_index.py | Python | apache-2.0 | 2,188 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('lists', '0005_item_list'),
]
operations = [
migrations.AlterModelOptions(
name='item',
options={'ordering': ('id',)},
),
migrations.AlterUniqueTogether(
name='item',
unique_together=set([('list', 'text')]),
),
]
| gyrodecl/tddsuperlists | lists/migrations/0006_list_item_unique_together.py | Python | gpl-3.0 | 484 |
"""Validators for media app"""
from django.core.exceptions import ValidationError
def validate_unique_url(value):
"""Validate a shortened URL is unique"""
from open_connect.media.models import ShortenedURL
if ShortenedURL.objects.filter(url=value).exists():
raise ValidationError(
'URLs in shorterner must be unique', code='invalid')
| lpatmo/actionify_the_news | open_connect/media/validators.py | Python | mit | 368 |
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
POLICY_ROOT = 'os_compute_api:os-simple-tenant-usage:%s'
simple_tenant_usage_policies = [
policy.RuleDefault(
name=POLICY_ROOT % 'show',
check_str=base.RULE_ADMIN_OR_OWNER),
policy.RuleDefault(
name=POLICY_ROOT % 'list',
check_str=base.RULE_ADMIN_API),
policy.RuleDefault(
name=POLICY_ROOT % 'discoverable',
check_str=base.RULE_ANY),
]
def list_rules():
return simple_tenant_usage_policies
| hanlind/nova | nova/policies/simple_tenant_usage.py | Python | apache-2.0 | 1,162 |
# coding: utf-8
# © 2015 David BEAL @ Akretion
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Project Model to issue',
'version': '8.0.0.5.0',
'category': 'project',
'author': 'Akretion, Odoo Community Association (OCA)',
'depends': [
'project',
],
'website': 'https://www.akretion.com',
'data': [
'project_view.xml',
],
'installable': True,
}
| sl2017/campos | project_model_to_issue/__openerp__.py | Python | agpl-3.0 | 435 |
#! /usr/bin/python
import time
import os
import sys
import cPickle
import Configuration.epistasisconfiguration as configuration
import Gridinterface.migsession as migsession
from Gridinterface.mylogger import log
from misc.jobfragmentation import fragment_epistasis, get_job_specs, create_epistasis_jobs
########### UPDATE / STATUS ###############
def get_epistasis_status(epistasis_jobs):
"""Return the status of the jobs and a progress indicator to be displayed in GUI."""
mig_session.update_jobs(epistasis_jobs)
jobs_done = []
for j in epistasis_jobs:
if j["status"] == "FINISHED":
jobs_done.append(j)
output_files = mig_session.handle_output(j)
for f in output_files:
result_dir = os.path.join(configuration.output_dir,j['results_dir'])
extract_output(f, result_dir)
if len(epistasis_jobs) == len(jobs_done):
log(logfile, 'All jobs completed', debug)
epistasis_status = 'finished'
progress_str = str(len(jobs_done)) + '/'\
+ str(len(epistasis_jobs))
status_lines = create_status_feed(epistasis_jobs)
status_lines.extend(create_status_feed(jobs_done))
status = ""
for line in status_lines:
status += line + '\n'
return status, progress_str
def download_output(j):
output_files = mig_session.handle_output(j)
for f in output_files:
result_dir = os.path.join(configuration.output_dir,j['results_dir'])
extract_output(f, result_dir)
########### UPDATE / STATUS ###############
def update_epistasis(epistasis_jobs):
mig_session.update_jobs(epistasis_jobs)
return epistasis_jobs
def create_status_feed(jobs):
"""Return a status string for each job"""
feed = []
for j in jobs:
line = create_status_str(j)
feed.append(line)
return feed
def create_status_str(job):
"""Return a status string for a job"""
status_str = 'Epistasis \t - class '
for val in job['class']:
status_str += str(val) + ' '
status_str += '-'+ '\t' + job['status']
if job['status'] != "EXECUTING":
status_str += "\t"
status_str += ' \t started:'+job["started"]+'\t \t ended:'+job["finished"]
return status_str
def extract_output(tar_file_path, dest_dir):
import tarfile
new_dir = os.path.join(dest_dir,os.path.basename(tar_file_path).strip(".tar.gz"))
if not os.path.exists(new_dir):
os.makedirs(new_dir)
prog_files = tarfile.open(tar_file_path, "r")
prog_files.extractall(path=new_dir)
prog_files.close()
print "Extracted %s to %s ." % (tar_file_path, new_dir)
# ######### START EPISTASIS ############
def start_epistasis(
selection_variable_values = configuration.default_variable_values,
genelist=configuration.default_gene_list,
traitlist=configuration.default_trait_list,
selection_variable=configuration.default_selection_variable_index,
data=configuration.data_file,
output_dir=configuration.output_dir,
job_size=configuration.default_job_size,
local_mode=False,
):
"""Start the epistasis procedure."""
time_list = time.localtime(time.time())
project_tag = str(time_list[2]) + '_' + str(time_list[1]) + '_'\
+ str(time_list[0]) + '_' + str(time_list[3])\
+ str(time_list[4]) + str(time_list[5])
# create the epistastis jobs
epi_jobs, project_dir = create_epistasis_jobs(
job_size,
genelist=genelist,
traitlist=traitlist,
selection_var=selection_variable,
variable_values=selection_variable_values,
data_file=data,
output_dir=output_dir,
project_tag=project_tag,
run_local=local_mode,
)
main_output_dir = output_dir
# make an output dir
proj_output_dir = os.path.join(output_dir,project_dir)
os.mkdir(proj_output_dir)
# create and submit the epistasis jobs
migjobs = mig_session.create_mig_jobs(epi_jobs)
# create a status file
pklfile_path = os.path.join(configuration.running_jobs_dir, project_tag)+".pkl"
# create a status file dir
if not os.path.exists(configuration.running_jobs_dir):
os.mkdir(configuration.running_jobs_dir)
# save the file
f = open(pklfile_path, "w")
cPickle.dump(epi_jobs, f)
f.close()
print "The gridepistasis has been started. Use \n python gridepistasis.py -u %s \n to get an update of the job." % pklfile_path
return migjobs
########## STOP /CANCEL ##############
def stop_epistasis(jobs):
"""Stop the epistasis procedure."""
mig_session.cancel_jobs(jobs)
###### PRINT ###########
def print_jobs(self, jobs):
"""Print jobs."""
for i in range(len(jobs)):
print 'job ' + str(i) + ' : ' + str(jobs[i])
def print_status(self, jobs):
"""Print job status."""
full_str = []
for j in jobs:
status_str = 'Job : ' + j['id'] + '\t' + j['status'
]['STATUS']
print status_str
full_str.append(status_str)
return full_str
# ### CLEAN UP ########
def clean_up_epistasis(jobs):
"""Delete files used in the epistasis procedure that are no longer needed."""
mig_session.clean_up(jobs)
###### MAIN ###############
# Arguments are entered in the order: selectionvariableindex jobsize
local = False
debug = False
if '-local' in sys.argv or '-l' in sys.argv:
local = True
# set up the needed mig job runtime environment
os.putenv("PYTHON", "/usr/bin/python")
os.putenv("R_HOME", "/usr/lib/R")
if '-debug' in sys.argv or '-d' in sys.argv:
debug = True
logfile = configuration.output_dir+configuration.logfile_name
mig_session = migsession.Migsession(configuration.output_dir, logfile, local, debug)
if __name__ == '__main__':
if '-u' in sys.argv :
i = sys.argv.index("-u")
pkl_file = open(sys.argv[i+1], "r+")
jobs = cPickle.load(pkl_file)
print get_epistasis_status(jobs)
cPickle.dump(jobs, pkl_file)
pkl_file.close()
else:
start_epistasis(local_mode=local)
| heromod/migrid | user-projects/EpistasisOnGrid/gridepistasis.py | Python | gpl-2.0 | 6,112 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2014 The ProteinDF development team.
# see also AUTHORS and README if provided.
#
# This file is a part of the ProteinDF software package.
#
# The ProteinDF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The ProteinDF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ProteinDF. If not, see <http://www.gnu.org/licenses/>.
"""
compare ProteinDF results
"""
import sys
import os.path
import argparse
import proteindf_bridge as bridge
import proteindf_tools as pdf
import logging
def get_OK_NG(yn):
if yn:
return "OK"
else:
return "NG"
def main():
parser = argparse.ArgumentParser(description='compare ProteinDF results')
parser.add_argument('FILE1',
nargs=1,
help='ProteinDF parameter file1')
parser.add_argument('FILE2',
nargs=1,
help='ProteinDF parameter file2')
parser.add_argument('-v', '--verbose',
action='store_true',
default=False)
parser.add_argument('-d', '--debug',
action='store_true',
default=False)
parser.add_argument('--compare-info',
action='store_true',
help='check info',
default=False)
parser.add_argument('--compare-energy',
action='store_true',
help='check TE',
default=False)
parser.add_argument('--compare-pop',
action='store_true',
help='check population',
default=False)
parser.add_argument('--compare-grad',
action='store_true',
help='check gradient',
default=False)
args = parser.parse_args()
verbose = args.verbose
debug = args.debug
compare_info = args.compare_info
compare_energy = args.compare_energy
compare_pop = args.compare_pop
compare_grad = args.compare_grad
# 全てoffならenergyをon
if (compare_info | compare_energy | compare_pop | compare_grad) == 0:
compare_info = True
compare_energy = True
# setup logger
if args.debug:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
logger.debug('check items')
logger.debug(' info: {}'.format(compare_info))
logger.debug(' TE: {}'.format(compare_energy))
logger.debug(' pop: {}'.format(compare_pop))
logger.debug(' grad: {}'.format(compare_grad))
path1 = args.FILE1[0]
path2 = args.FILE2[0]
if not os.path.isfile(path1):
sys.exit('file not found: %s' % (path1))
if not os.path.isfile(path2):
sys.exit('file not found: %s' % (path2))
data1 = pdf.PdfParam_H5()
data1.open(path1)
data2 = pdf.PdfParam_H5()
data2.open(path2)
# check ------------------------------------------------------------
answer = True
if compare_info:
judge = data1.compare_info(data2)
logger.info('check info: {}'.format(get_OK_NG(judge)))
answer = answer & judge
if compare_energy:
judge = data1.compare_energy(data2)
logger.info('check TE: {}'.format(get_OK_NG(judge)))
answer = answer & judge
if compare_pop:
judge = data1.compare_pop(data2)
logger.info('check pop: {}'.format(get_OK_NG(judge)))
answer = answer & judge
if compare_grad:
judge = data1.compare_grad(data2)
logger.info('check grad: {}'.format(get_OK_NG(judge)))
answer = answer & judge
# output -----------------------------------------------------------
if answer:
logger.info('check OK')
sys.exit(0)
else:
logger.error('ProteinDF results are not consistent.')
sys.exit(1)
if __name__ == '__main__':
if os.path.exists("config.ini"):
logging.config.fileConfig("config.ini",
disable_existing_loggers=False)
main()
| ProteinDF/ProteinDF_pytools | scripts/pdf-test-h5.py | Python | gpl-3.0 | 4,651 |
from vsg.vhdlFile.extract import utils
from vsg.vhdlFile.extract import tokens
def get_line_which_includes_tokens(lTokens, lAllTokens, oTokenMap):
lReturn = []
lTokenIndexes = utils.get_indexes_of_token_list(lTokens, oTokenMap)
for iIndex in lTokenIndexes:
iStart = oTokenMap.get_index_of_carriage_return_before_index(iIndex)
if iStart is None:
iStart = 0
else:
iStart += 1
iEnd = oTokenMap.get_index_of_carriage_return_after_index(iIndex)
iLine = oTokenMap.get_line_number_of_index(iIndex)
lTemp = lAllTokens[iStart:iEnd]
oTokens = tokens.New(iStart, iLine, lTemp)
oTokens.token_index = iIndex - iStart
lReturn.append(oTokens)
return lReturn
| jeremiah-c-leary/vhdl-style-guide | vsg/vhdlFile/extract/get_line_which_includes_tokens.py | Python | gpl-3.0 | 767 |
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=too-many-lines, unused-argument
"""numpy ndarray and util functions."""
try:
from __builtin__ import all as py_all
from __builtin__ import slice as py_slice
except ImportError:
from builtins import all as py_all
from builtins import slice as py_slice
from array import array as native_array
import functools
import ctypes
import warnings
import numpy as _np
from .. import _deferred_compute as dc
from ..autograd import is_recording
from ..ndarray import NDArray, _DTYPE_NP_TO_MX, _GRAD_REQ_MAP
from ..ndarray import indexing_key_expand_implicit_axes, get_indexing_dispatch_code,\
get_oshape_of_gather_nd_op
from ..ndarray._internal import _set_np_ndarray_class
from . import _op as _mx_np_op
from ..base import check_call, _LIB, NDArrayHandle, c_array, mx_int, mx_int64
from ..base import mx_real_t, c_array_buf, mx_uint, numeric_types, integer_types
from ..runtime import Features
from ..context import Context
from ..util import set_module, wrap_np_unary_func, wrap_np_binary_func,\
is_np_default_dtype
from ..context import current_context
from ..ndarray import numpy as _mx_nd_np
from ..ndarray.numpy import _internal as _npi
from ..ndarray.ndarray import _storage_type
from ..dlpack import ndarray_from_numpy
from .utils import _get_np_op
from .fallback import * # pylint: disable=wildcard-import,unused-wildcard-import
from . import fallback
__all__ = ['ndarray', 'empty', 'empty_like', 'array', 'shape', 'median',
'zeros', 'zeros_like', 'ones', 'ones_like', 'full', 'full_like', 'all', 'any', 'broadcast_to',
'add', 'subtract', 'multiply', 'divide', 'mod', 'remainder', 'fmod', 'power', 'bitwise_not',
'delete', 'trace', 'transpose', 'copy', 'moveaxis', 'reshape', 'dot',
'arctan2', 'sin', 'cos', 'tan', 'sinh', 'cosh', 'tanh', 'log10', 'invert',
'sqrt', 'cbrt', 'abs', 'absolute', 'fabs', 'exp', 'expm1', 'arcsin', 'arccos', 'arctan', 'sign', 'log',
'degrees', 'log2', 'log1p', 'rint', 'radians', 'reciprocal', 'square', 'negative', 'histogram',
'fix', 'ceil', 'floor', 'trunc', 'logical_not', 'arcsinh', 'arccosh', 'arctanh', 'append', 'argsort',
'sort', 'tensordot', 'eye', 'linspace', 'logspace', 'expand_dims', 'tile', 'arange',
'array_split', 'split', 'hsplit', 'vsplit', 'dsplit', 'flatnonzero', 'tril_indices',
'concatenate', 'stack', 'vstack', 'row_stack', 'column_stack', 'hstack', 'dstack',
'average', 'mean', 'maximum', 'fmax', 'minimum', 'fmin', 'amax', 'amin', 'max', 'min',
'swapaxes', 'clip', 'argmax', 'argmin', 'std', 'var', 'insert',
'indices', 'copysign', 'ravel', 'unravel_index', 'diag_indices_from', 'hanning', 'hamming', 'blackman',
'logical_and', 'logical_or', 'logical_xor',
'flip', 'flipud', 'fliplr', 'around', 'round', 'round_', 'arctan2', 'hypot',
'triu_indices_from', 'triu_indices', 'tri',
'bitwise_and', 'bitwise_xor', 'bitwise_or', 'rad2deg', 'deg2rad',
'unique', 'lcm', 'tril', 'triu', 'identity', 'take', 'ldexp', 'vdot', 'inner', 'outer',
'cross', 'kron', 'equal', 'not_equal', 'interp',
'greater', 'less', 'greater_equal', 'less_equal', 'roll', 'rot90', 'einsum', 'true_divide', 'nonzero',
'quantile', 'percentile', 'shares_memory', 'may_share_memory', 'diff', 'ediff1d', 'resize', 'matmul',
'nan_to_num', 'isnan', 'isinf', 'isposinf', 'isneginf', 'isfinite', 'polyval', 'where', 'bincount',
'atleast_1d', 'atleast_2d', 'atleast_3d', 'fill_diagonal', 'squeeze',
'diagflat', 'repeat', 'prod', 'pad', 'cumsum', 'sum', 'rollaxis', 'diag', 'diagonal']
__all__ += fallback.__all__
# Return code for dispatching indexing function call
_NDARRAY_UNSUPPORTED_INDEXING = -1
_NDARRAY_BASIC_INDEXING = 0
_NDARRAY_ADVANCED_INDEXING = 1
_NDARRAY_EMPTY_TUPLE_INDEXING = 2
# Return code for 0-d boolean array handler
_NDARRAY_NO_ZERO_DIM_BOOL_ARRAY = -1
_NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE = 0
_NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE = 1
_SIGNED_INT32_UPPER_LIMIT = (2**31 - 1)
# Caching whether MXNet was built with INT64 support or not
_INT64_TENSOR_SIZE_ENABLED = None
def _int64_enabled():
global _INT64_TENSOR_SIZE_ENABLED
if _INT64_TENSOR_SIZE_ENABLED is None:
_INT64_TENSOR_SIZE_ENABLED = Features().is_enabled('INT64_TENSOR_SIZE')
return _INT64_TENSOR_SIZE_ENABLED
# This function is copied from ndarray.py since pylint
# keeps giving false alarm error of undefined-all-variable
def _new_alloc_handle(shape, ctx, delay_alloc, dtype=mx_real_t): # pylint: disable=redefined-outer-name
"""Return a new handle with specified shape and context.
Empty handle is only used to hold results.
Returns
-------
handle
A new empty `ndarray` handle.
"""
hdl = NDArrayHandle()
if _int64_enabled():
check_call(_LIB.MXNDArrayCreate64(
c_array_buf(mx_int64, native_array('q', shape)),
ctypes.c_int(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[_np.dtype(dtype).type])),
ctypes.byref(hdl)))
else:
# When shape is larger than uint32 then there is an overflow error at python end itself.
# It needs to be caught here since the call doesn't even reach backend.
array_size = 1
for idx in shape:
array_size = array_size * idx
if array_size > _SIGNED_INT32_UPPER_LIMIT:
raise Exception("[_new_alloc_handle] Size of tensor you are trying to allocate is " +
"larger than 2^31 elements. Please build with flag " +
"USE_INT64_TENSOR_SIZE=1")
if _np.dtype(dtype) == _np.dtype([('bfloat16', _np.uint16)]):
dtype_type = _np.dtype(dtype)
else:
dtype_type = _np.dtype(dtype).type
check_call(_LIB.MXNDArrayCreate(
c_array_buf(mx_uint, native_array('I', shape)),
mx_uint(len(shape)),
ctypes.c_int(ctx.device_typeid),
ctypes.c_int(ctx.device_id),
ctypes.c_int(int(delay_alloc)),
ctypes.c_int(int(_DTYPE_NP_TO_MX[dtype_type])),
ctypes.byref(hdl)))
return hdl
def _reshape_view(a, *shape): # pylint: disable=redefined-outer-name
"""Returns a **view** of this array with a new shape without altering any data.
Parameters
----------
shape : tuple of int, or n ints
The new shape should not change the array size, namely
``np.prod(new_shape)`` should be equal to ``np.prod(a.shape)``.
Some dimensions of the shape can take special value -1, which
infers the dimension of the output shape by using the remainder of the
input dimensions keeping the size of the new array same as that of the input array.
At most one dimension of shape can be -1.
Returns
-------
ndarray
An array with desired shape that shares data with this array.
"""
if len(shape) == 1 and isinstance(shape[0], (list, tuple)):
shape = shape[0]
handle = NDArrayHandle()
check_call(_LIB.MXNDArrayReshape64(a.handle,
len(shape),
c_array(ctypes.c_int64, shape),
False,
ctypes.byref(handle)))
return ndarray(handle=handle, writable=a.writable)
def _as_mx_np_array(object, ctx=None, zero_copy=False):
"""Convert arrays or any array member of container to mxnet.numpy.ndarray on ctx."""
if object is None or isinstance(object, ndarray):
return object
elif isinstance(object, _np.ndarray):
from_numpy = ndarray_from_numpy(ndarray, array)
return from_numpy(object, zero_copy and object.flags['C_CONTIGUOUS'])
elif isinstance(object, (integer_types, numeric_types)):
return object
elif isinstance(object, (_np.bool_, _np.bool)):
return array(object, dtype=_np.bool_, ctx=ctx)
elif isinstance(object, (list, tuple)):
tmp = [_as_mx_np_array(arr, ctx=ctx, zero_copy=zero_copy) for arr in object]
return object.__class__(tmp)
else:
raise TypeError('Does not support converting {} to mx.np.ndarray.'.format(str(type(object))))
def _as_onp_array(object):
"""Convert object to mxnet.numpy.ndarray."""
cur_ctx = None
if isinstance(object, ndarray):
return object.asnumpy(), object.ctx
elif isinstance(object, (list, tuple)):
tmp = []
for arr in object:
arr, tmp_ctx = _as_onp_array(arr)
# if isinstance(arr, (list, tuple)):
# raise TypeError('type {} not supported'.format(str(type(arr))))
tmp.append(arr)
if cur_ctx is None:
cur_ctx = tmp_ctx
elif tmp_ctx is not None and cur_ctx != tmp_ctx:
raise ValueError('Ambiguous to set the context for the output ndarray since' # pylint: disable=too-few-format-args
' input ndarrays are allocated on different devices: {} and {}'
.format(str(cur_ctx, tmp_ctx)))
return object.__class__(tmp), cur_ctx
else:
return object, cur_ctx
# Have to use 0 as default value for stype since pylint does not allow
# importing _STORAGE_TYPE_DEFAULT from ndarray.py.
def _np_ndarray_cls(handle, writable=True, stype=0):
if stype == -1:
stype = _storage_type(handle)
if stype != 0:
raise ValueError('_np_ndarray_cls currently only supports default storage '
'type, while received stype = {}'.format(stype))
return ndarray(handle, writable=writable)
_set_np_ndarray_class(_np_ndarray_cls)
_NUMPY_ARRAY_FUNCTION_DICT = {}
_NUMPY_ARRAY_UFUNC_DICT = {}
_FALLBACK_ARRAY_FUNCTION_WARNED_RECORD = {}
_FALLBACK_ARRAY_UFUNC_WARNED_RECORD = {}
def wrap_mxnp_np_ufunc(func):
"""
A convenience decorator for wrapping for python overload-able ops to provide type
casting for mixed use of mx_np and onp inputs.
Parameters
----------
func : a python overload-able binary function to be wrapped for type casting.
Returns
-------
Function
A function wrapped with type casted.
"""
@functools.wraps(func)
def _wrap_mxnp_np_ufunc(x1, x2):
if isinstance(x2, _np.ndarray):
x2 = _as_mx_np_array(x2, ctx=x1.ctx)
return func(x1, x2)
return _wrap_mxnp_np_ufunc
@set_module('mxnet.numpy')
class ndarray(NDArray): # pylint: disable=invalid-name
"""
ndarray(handle, writable=True):
An array object represents a multidimensional, homogeneous array of fixed-size items.
An associated data-type object describes the format of each element in the array
(its byte-order, how many bytes it occupies in memory, whether it is an integer, a
floating point number, or something else, etc.). Arrays should be constructed using
`array`, `zeros` or `empty`. Currently, only c-contiguous arrays are supported.
Arrays should be constructed using `array`, `zeros` or `empty` (refer
to the See Also section below). The parameters given here refer to
a low-level method (`ndarray(...)`) for instantiating an array.
For more information, refer to the `mxnet.numpy` module and examine the
methods and attributes of an array.
Parameters
----------
handle: int
The ndarray handle in backend (C++).
writable: bool
Indicates whether inplace-assignment is allowed for the array.
Attributes
----------
T : ndarray
Transpose of the array.
dtype : dtype object
Describes the format of the elements in the array.
size : int
Number of elements in the array.
ndim : int
The array's number of dimensions.
shape : tuple of ints
Shape of the array.
See Also
--------
array : Construct an array.
zeros : Create an array, each element of which is zero.
empty : Create an array, but leave its allocated memory unchanged (i.e.,
it contains "garbage").
"""
@staticmethod
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs): # pylint: disable=bad-staticmethod-argument
"""
Dispatch official NumPy unary/binary operator calls on mxnet.numpy.ndarray
to this function. The operators must comply with the ufunc definition in NumPy.
The following code is adapted from CuPy.
Casting rules for operator with mx_np and onp (inplace op will keep its type)
| Expression | a type | b type | out type|
| --- | --- | --- | --- |
| `a += b` | onp | mx_np | onp |
| `a += b` | mx_np | onp | mx_np |
| `c = a + b` | onp | mx_np | mx_np |
| `c = a + b` | mx_np | onp | mx_np |
"""
ufunc_list = ["add", "subtract", "multiply", "divide", "true_divide", "floor_divide", "power",
"remainder", "bitwise_and", "bitwise_or", "bitwise_xor", "left_shift", "right_shift",
"greater", "greater_equal", "less", "less_equal", "not_equal", "equal", "matmul"]
if 'out' in kwargs:
# need to unfold tuple argument in kwargs
out = kwargs['out']
if len(out) != 1:
raise ValueError('The `out` parameter must have exactly one ndarray')
kwargs['out'] = out[0]
if method == '__call__':
name = ufunc.__name__
mx_ufunc = _NUMPY_ARRAY_UFUNC_DICT.get(name, None)
onp_op = _get_np_op(name)
if mx_ufunc is None:
# try to fallback to official NumPy op
if is_recording():
raise ValueError("Falling back to NumPy operator {} with autograd active is not supported."
"Please consider moving the operator to the outside of the autograd scope.")\
.format(name)
new_inputs = [arg.asnumpy() if isinstance(arg, ndarray) else arg for arg in inputs]
if onp_op not in _FALLBACK_ARRAY_UFUNC_WARNED_RECORD:
import logging
logging.warning("np.%s is a fallback operator, "
"which is actually using official numpy's implementation", name)
_FALLBACK_ARRAY_UFUNC_WARNED_RECORD[onp_op] = True
out = onp_op(*new_inputs, **kwargs)
return _as_mx_np_array(out, ctx=inputs[0].ctx)
# ops with np mx_np
elif name in ufunc_list and isinstance(inputs[0], _np.ndarray):
# inplace
if 'out' in kwargs:
new_inputs = [arg.asnumpy() if isinstance(arg, ndarray) else arg for arg in inputs]
return onp_op(*new_inputs, **kwargs)
else:
new_inputs = [_as_mx_np_array(arg, ctx=inputs[1].ctx)
if isinstance(arg, _np.ndarray) else arg for arg in inputs]
return mx_ufunc(*new_inputs, **kwargs)
else:
return mx_ufunc(*inputs, **kwargs)
else:
return NotImplemented
@staticmethod
def __array_function__(self, func, types, args, kwargs): # pylint: disable=bad-staticmethod-argument
"""
Dispatch official NumPy operators that comply with the array function protocol to
this function.
"""
mx_np_func = _NUMPY_ARRAY_FUNCTION_DICT.get(func, None)
func_name = func.__name__
if mx_np_func is None:
# try to fallback to official NumPy op
if is_recording():
raise ValueError("Falling back to NumPy operator {} with autograd active is not supported."
"Please consider moving the operator to the outside of the autograd scope.")\
.format(func)
new_args, cur_ctx = _as_onp_array(args)
if cur_ctx is None:
raise ValueError('Unknown context for the input ndarrays. It is probably a bug. Please'
' create an issue on GitHub.')
new_kwargs = {}
for k, v in kwargs.items():
new_kwargs[k] = v.asnumpy() if isinstance(v, ndarray) else v
if func not in _FALLBACK_ARRAY_FUNCTION_WARNED_RECORD:
import logging
logging.warning("np.%s is a fallback operator, "
"which is actually using official numpy's implementation.", func_name)
_FALLBACK_ARRAY_FUNCTION_WARNED_RECORD[func] = True
out = func(*new_args, **new_kwargs)
return _as_mx_np_array(out, ctx=cur_ctx)
else:
if py_all(issubclass(t, ndarray) for t in types):
return mx_np_func(*args, **kwargs)
else:
try:
cur_ctx = next(a.ctx for a in args if hasattr(a, 'ctx'))
except StopIteration:
cur_ctx = next(a.ctx for a in kwargs.values() if hasattr(a, 'ctx'))
new_args = _as_mx_np_array(args, ctx=cur_ctx,
zero_copy=func_name in {'may_share_memory', 'shares_memory'})
new_kwargs = {k: _as_mx_np_array(v, cur_ctx) for k, v in kwargs.items()}
return mx_np_func(*new_args, **new_kwargs)
def _get_np_basic_indexing(self, key):
"""
This function indexes ``self`` with a tuple of `slice` objects only.
"""
key_nd = tuple(idx for idx in key if idx is not None)
if len(key_nd) < self.ndim:
raise RuntimeError(
'too few indices after normalization: expected `ndim` ({}) '
'but got {}. This is a bug, please report it!'
''.format(self.ndim, len(key_nd))
)
if len(key_nd) > self.ndim:
raise IndexError(
'too many indices ({}) for array with {} dimensions'
''.format(len(key_nd), self.ndim)
)
none_axes = [ax for ax in range(len(key)) if key[ax] is None] # pylint: disable=invalid-name
slc_key, int_axes = self._basic_indexing_key_int_to_slice(key_nd)
new_axes = self._new_axes_after_basic_indexing(none_axes, key)
# Check bounds for integer axes
for ax in int_axes: # pylint: disable=invalid-name
if not -self.shape[ax] <= key_nd[ax] < self.shape[ax]:
raise IndexError(
'index {} is out of bounds for axis {} with size {}'
''.format(key_nd[ax], ax, self.shape[ax]))
if self._basic_indexing_slice_is_contiguous(slc_key, self.shape):
# Create a shared-memory view by using low-level flat slicing
flat_begin, flat_end = self._basic_indexing_contiguous_flat_begin_end(
slc_key, self.shape
)
handle = NDArrayHandle()
flat_self = self.reshape_view(-1)
if _int64_enabled():
check_call(
_LIB.MXNDArraySlice64(
flat_self.handle,
ctypes.c_int64(flat_begin),
ctypes.c_int64(flat_end),
ctypes.byref(handle),
)
)
else:
check_call(
_LIB.MXNDArraySlice(
flat_self.handle,
ctypes.c_uint32(flat_begin),
ctypes.c_uint32(flat_end),
ctypes.byref(handle),
)
)
sliced_shape = self._basic_indexing_sliced_shape(slc_key, self.shape)
sliced = self.__class__(handle=handle, writable=self.writable)
if 0 in sliced_shape:
sliced = sliced.reshape(sliced_shape)
else:
sliced = sliced.reshape_view(sliced_shape)
else:
begin, end, step = self._basic_indexing_key_to_begin_end_step(
slc_key, self.shape, keep_none=True
)
sliced = _npi.slice(self, begin, end, step)
# Reshape to final shape due to integer and `None` entries in `key`.
final_shape = [sliced.shape[i] for i in range(sliced.ndim) if i not in int_axes]
for ax in new_axes: # pylint: disable=invalid-name
final_shape.insert(ax, 1)
if sliced.size == 0:
return sliced.reshape(tuple(final_shape))
else:
return sliced.reshape_view(tuple(final_shape))
def _get_np_empty_tuple_indexing(self, key):
new_shape = []
num_none = 0
for i, idx in enumerate(key):
if idx is None:
new_shape.append(1) # expand dimension
num_none += 1
elif idx == ():
new_shape.append(0) # 0 shape
elif idx == slice(None, None, None):
new_shape.append(self.shape[i - num_none])
return empty(new_shape, dtype=self.dtype)
def _get_np_advanced_indexing(self, key):
idcs, new_axes = self._get_index_nd(key)
if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck
idcs = idcs.as_np_ndarray()
else:
idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])
sliced = _npi.gather_nd(self, idcs)
# Reshape due to `None` entries in `key`.
if new_axes:
final_shape = [sliced.shape[i] for i in range(sliced.ndim)]
for ax in new_axes: # pylint: disable=invalid-name
final_shape.insert(ax, 1)
return sliced.reshape(tuple(final_shape))
else:
return sliced
def _set_np_advanced_indexing(self, key, value):
"""This function is called by __setitem__ when key is an advanced index."""
idcs, new_axes = self._get_index_nd(key)
if type(idcs) == NDArray: # pylint: disable=unidiomatic-typecheck
idcs = idcs.as_np_ndarray()
else:
idcs = _npi.stack(*[i if isinstance(i, self.__class__) else i.as_np_ndarray() for i in idcs])
vshape = get_oshape_of_gather_nd_op(self.shape, idcs.shape)
value_nd = self._prepare_value_nd(value, bcast_shape=vshape, squeeze_axes=new_axes)
self._scatter_set_nd(value_nd, idcs)
# pylint: disable=redefined-outer-name
def _get_np_boolean_indexing(self, key, ndim, shape):
"""
There are two types of boolean indices (which are equivalent,
for the most part though). This function will handle single
boolean indexing for higher speed.
If this is not the case, it is instead expanded into (multiple)
integer array indices and will be handled by advanced indexing.
"""
key_shape = key.shape
key_ndim = len(key_shape)
if ndim < key_ndim:
raise IndexError('too many indices, whose ndim = {}, for array with ndim = {}'
.format(key_ndim, ndim))
for i in range(key_ndim):
if key_shape[i] != shape[i]:
raise IndexError('boolean index did not match indexed array along dimension {};'
' dimension is {} but corresponding boolean dimension is {}'
.format(i, shape[i], key_shape[i]))
remaining_dims = shape[key_ndim:]
data = _reshape_view(self, -1, *remaining_dims)
key = _reshape_view(key, -1)
return _reshape_view(_npi.boolean_mask(data, key), -1, *remaining_dims)
def _set_np_boolean_indexing(self, key, value):
"""
There are two types of boolean indices (which are equivalent,
for the most part though). This function will handle single boolean assign for higher speed.
If this is not the case, it is instead expanded into (multiple)
integer array indices and will be handled by advanced assign.
"""
if isinstance(value, numeric_types):
_npi.boolean_mask_assign_scalar(data=self, mask=key,
value=int(value) if isinstance(value, bool) else value,
start_axis=0, out=self)
elif isinstance(value, ndarray):
_npi.boolean_mask_assign_tensor(data=self, mask=key, value=value, start_axis=0, out=self)
else:
raise NotImplementedError('type %s is not supported.'%(type(value)))
# pylint: disable=too-many-return-statements
def __getitem__(self, key):
"""Return self[key].
Returns a sliced view of this array if the elements fetched are contiguous in memory;
otherwise, returns a newly created NDArray.
This functions supports advanced indexing defined in the following reference with
some restrictions. Boolean indexing is supported only for a single boolean ndarray
as a key. Mixing boolean ndarray with other index types is not supported in ``advanced``
indexing.
For basic indexing, i.e., if ``key`` consists only of integers,
``slice``, ``Ellipsis`` (``...``) and ``None``, a mutable view is
returned that shares memory with this array if the accessed portion is
contiguous in memory.
Otherwise, a newly created ``ndarray`` is returned.
This functions supports advanced indexing as defined in `the NumPy
advanced indexing documentation
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_.
Parameters
----------
key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types
Indexing key.
Examples
--------
The default is to give explicit indices for all axes:
>>> x = np.arange(6).reshape(2, 3)
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> x[0, :2]
array([0., 1.])
>>> x[:, :-1]
array([[0., 1.],
[3., 4.]])
If fewer indices are given, they are automatically supplemented by an
appropriate number of ``slice(None)`` ("``:``") to the right. For
instance, a single integer indexes along the first axis:
>>> x[0]
array([0., 1., 2.])
>>> x[1:]
array([[3., 4., 5.]])
To omit a range of axes that should be kept as-is, an `Ellipsis`
("``...``") can be used:
>>> x = np.arange(16).reshape(2, 2, 2, 2)
>>> x[0, ..., 1]
array([[1., 3.],
[5., 7.]])
>>> x[0, :, :, 1] # equivalent
array([[1., 3.],
[5., 7.]])
New axes of length 1 can be created by inserting ``None``
(`numpy.newaxis`) in the index:
>>> x = np.arange(6).reshape(2, 3)
>>> x[None, :, :]
array([[[0., 1., 2.],
[3., 4., 5.]]])
>>> x[None, :, :].shape
(1, 2, 3)
If the indexed portion of the array is contiguous in memory, no data
is copied. Instead, a shared-memory view of the original array is
returned, and changes to that view affect the original array:
>>> x = np.arange(8).reshape(2, 2, 2)
>>> y = x[0] # contiguous
>>> y
array([[0., 1.],
[2., 3.]])
>>> y[:] = -1
>>> x
array([[[-1., -1.],
[-1., -1.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> x = np.arange(8).reshape(2, 2, 2)
>>> y = x[1, :1, :] # contiguous
>>> y
array([[4., 5.]])
>>> y[:] = -1
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[-1., -1.],
[ 6., 7.]]])
>>> x = np.arange(0, 8).reshape(2, 2, 2)
>>> y = x[:, :, 1] # not contiguous
>>> y
array([[1., 3.],
[5., 7.]])
>>> y[:] = -1
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
If the indexing key contains `list`, `numpy.ndarray` or `NDArray`
objects, advanced indexing is triggered, which always returns a
copy:
>>> x = np.arange(8).reshape(2, 2, 2)
>>> x[[0, 1]]
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> x[[0, 1], :] # equivalent
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> y = np.array([0, 1], dtype='int32')
>>> x[1:, y]
array([[[4., 5.],
[6., 7.]]])
>>> y = np.array([0, 1], dtype='int32')
>>> x[1:, y]
array([[[4., 5.],
[6., 7.]]])
Get negative elements in an ndarray through boolean array indexing
>>> x = np.array([1., -1., -2., 3])
>>> x[x < 0]
array([-1., -2.])
For more imformation related to boolean indexing, please refer to
https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html.
"""
ndim = self.ndim # pylint: disable=redefined-outer-name
shape = self.shape # pylint: disable=redefined-outer-name
if isinstance(key, bool): # otherwise will be treated as 0 and 1
key = array(key, dtype=_np.bool, ctx=self.ctx)
if isinstance(key, list):
try:
new_key = _np.array(key)
if new_key.dtype == _np.bool_:
key = new_key
except Exception as err:
raise TypeError('{}'.format(str(err)))
if isinstance(key, _np.ndarray):
if dc.is_deferred_compute():
raise TypeError('Indexing with a numpy array is not supported in HybridBlock.')
if key.dtype == _np.bool_:
key = array(key, dtype='bool', ctx=self.ctx)
# Handle single boolean index of matching dimensionality and size first for higher speed
# If the boolean array is mixed with other idices, it is instead expanded into (multiple)
# integer array indices and will be handled by advanced indexing.
# Come before the check self.dim == 0 as it also handle the 0-dim case.
if isinstance(key, ndarray) and key.dtype == _np.bool_:
return self._get_np_boolean_indexing(key, ndim, shape)
all = __builtins__['all'] # `def all` below shadows the all builtin
if ndim == 0 and key != ():
raise IndexError('scalar tensor can only accept `()` as index')
# Handle simple cases for higher speed
if isinstance(key, tuple) and len(key) == 0:
return self
if isinstance(key, tuple) and len(key) == ndim\
and py_all(isinstance(idx, integer_types) for idx in key):
out = self
for idx in key:
out = out[idx]
return out
if isinstance(key, integer_types):
# Equivalent to isinstance(key, integer_types) case in numpy/_symbol.py
if key > shape[0] - 1:
raise IndexError(
'index {} is out of bounds for axis 0 with size {}'.format(
key, shape[0]))
return self._at(key)
elif isinstance(key, py_slice):
# Unlike numpy/_symbol.py, calls MXNDArraySlice64 writable memory
# sharing if key.step not in [None, 1]. Equivalent otherwise to
# isinstance(key, py_slice) case in _symbol.py otherwise.
if key.step is None or key.step == 1:
if key.start is not None or key.stop is not None:
return self._slice(key.start, key.stop)
else:
return self
elif key.step != 0:
start = [None] if key.start is None else key.start
stop = [None] if key.stop is None else key.stop
return _npi.slice(self, start, stop, key.step)
else:
raise ValueError("slice step cannot be zero")
elif isinstance(key, tuple) and \
all((isinstance(arr, NDArray) and _np.issubdtype(arr.dtype, _np.integer) and \
arr.ndim > 0) for arr in key):
# Equivalent case in numpy/_symbol.py
return _npi.advanced_indexing_multiple(self, _npi.stack(*key))
elif isinstance(key, tuple) and dc.is_deferred_compute():
# Equivalent to isinstance(key, tuple) case in numpy/_symbol.py
# Only enabled in deferred compute mode, as this codepath prevents
# memory sharing which may be desired in non-deferred compute
# imperative mode.
begin = []
end = []
step = []
new_shape = ()
assert len(key) # len(key) == 0 is handled a above
unsupported = False
for index in key:
if isinstance(index, py_slice):
if index.step is not None and index.step == 0:
raise ValueError("slice step cannot be zero")
begin.append(index.start)
end.append(index.stop)
step.append(index.step)
new_shape += (-2,)
elif isinstance(index, integer_types):
if index >= 0:
begin.append(index)
end.append(index+1)
step.append(1)
else:
begin.append(index)
end.append(index - 1)
step.append(-1)
new_shape += (-3,)
else:
unsupported = True
break
if not unsupported:
new_shape += (-4,)
sliced = _npi.slice(self, begin, end, step)
return _npi.reshape(sliced, new_shape)
# Special handling for cases only supported in imperative mode
if dc.is_deferred_compute():
raise TypeError('The type of indexing used is not supported in HybridBlock.')
# For 0-d boolean indices: A new axis is added,
# but at the same time no axis is "used". So if we have True,
# we add a new axis (a bit like with np.newaxis). If it is
# False, we add a new axis, but this axis has 0 entries.
# prepend is defined to handle this case.
# prepend = _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar
# prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded
# prepend = _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be prepended
key, prepend = indexing_key_expand_implicit_axes(key, self.shape)
indexing_dispatch_code = get_indexing_dispatch_code(key)
if indexing_dispatch_code == _NDARRAY_EMPTY_TUPLE_INDEXING:
# won't be affected by zero-dim boolean indices
return self._get_np_empty_tuple_indexing(key)
elif indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return empty((0,) + self._get_np_basic_indexing(key).shape,
dtype=self.dtype, ctx=self.ctx)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE:
key = (_np.newaxis,) + key
return self._get_np_basic_indexing(key)
elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return empty((0,) + self._get_np_adanced_indexing(key).shape,
dtype=self.dtype, ctx=self.ctx)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE:
key = (_np.newaxis,) + key
return self._get_np_advanced_indexing(key)
else:
raise RuntimeError
# pylint: disable=inconsistent-return-statements
def __setitem__(self, key, value):
"""Sets ``self[key]`` to ``value``.
This functions supports advanced indexing as defined in `the NumPy
advanced indexing documentation
<https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing>`_,
with the restriction that boolean array indexing is not supported.
Parameters
----------
key : int, slice, list, np.ndarray, mx.np.ndarray, or tuple of all previous types
The indexing key.
value : scalar or array-like object that can be broadcast to the shape of self[key]
The value to set.
Examples
--------
>>> x = np.zeros((2, 3))
>>> x[:] = 1
>>> x
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
>>> x[:, 1:2] = 2
>>> x
array([[ 1., 2., 1.],
[ 1., 2., 1.]])
>>> x[1:2, 1:] = 3
>>> x
array([[ 1., 2., 1.],
[ 1., 3., 3.]])
>>> x[1:, 0:2] = np.zeros((1, 2))
>>> x
array([[ 1., 2., 1.],
[ 0., 0., 3.]])
>>> x[1, 2] = 4
>>> x
array([[ 1., 2., 1.],
[ 0., 0., 4.]])
>>> x[[0], [1, 2]] = 5
>>> x
array([[ 1., 5., 5.],
[ 0., 0., 4.]])
>>> x[::-1, 0:2:2] = [6]
>>> x
array([[ 6., 5., 5.],
[ 6., 0., 4.]])
For imformation related to boolean indexing, please refer to
https://docs.scipy.org/doc/numpy-1.17.0/reference/arrays.indexing.html.
"""
if isinstance(value, NDArray) and not isinstance(value, ndarray):
raise TypeError('Cannot assign mx.nd.NDArray to mxnet.numpy.ndarray')
if isinstance(key, bool): # otherwise will be treated as 0 and 1
key = array(key, dtype=_np.bool)
# Handle single boolean assign of matching dimensionality and size first for higher speed
# If the boolean array is mixed with other idices, it is instead expanded into (multiple)
# integer array indices and will be handled by advanced assign.
# Come before the check self.dim == 0 as it also handle the 0-dim case.
if isinstance(key, ndarray) and key.dtype == _np.bool:
return self._set_np_boolean_indexing(key, value)
# handle basic and advanced indexing
if self.ndim == 0:
if not isinstance(key, tuple) or len(key) != 0:
raise IndexError('scalar tensor can only accept `()` as index')
if isinstance(value, numeric_types):
self._full(value)
elif isinstance(value, ndarray) and value.size == 1:
if value.shape != self.shape:
value = value.reshape(self.shape)
value.copyto(self)
elif isinstance(value, (_np.ndarray, _np.generic)) and value.size == 1:
if isinstance(value, _np.generic) or value.shape != self.shape:
value = value.reshape(self.shape)
self._sync_copyfrom(value)
else:
raise ValueError('setting an array element with a sequence.')
else:
# For 0-d boolean indices: A new axis is added,
# but at the same time no axis is "used". So if we have True,
# we add a new axis (a bit like with np.newaxis). If it is
# False, we add a new axis, but this axis has 0 entries.
# prepend is defined to handle this case.
# prepend == _NDARRAY_NO_ZERO_DIM_BOOL_ARRAY/-1 means there is no 0-d boolean scalar
# prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE/0 means an zero dim must be expanded
# prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_TRUE/1 means a new axis must be expanded
# prepend actually has no influence on __setitem__
key, prepend = indexing_key_expand_implicit_axes(key, self.shape)
if prepend == _NDARRAY_ZERO_DIM_BOOL_ARRAY_FALSE:
return # no action is needed
slc_key = tuple(idx for idx in key if idx is not None)
if len(slc_key) < self.ndim:
raise RuntimeError(
'too few indices after normalization: expected `ndim` ({}) '
'but got {}. This is a bug, please report it!'
''.format(self.ndim, len(slc_key))
)
if len(slc_key) > self.ndim and self.ndim != 0:
raise IndexError(
'too many indices ({}) for array with {} dimensions'
''.format(len(slc_key), self.ndim)
)
indexing_dispatch_code = get_indexing_dispatch_code(slc_key)
if indexing_dispatch_code == _NDARRAY_BASIC_INDEXING:
self._set_nd_basic_indexing(key, value) # function is inheritated from NDArray class
elif indexing_dispatch_code == _NDARRAY_EMPTY_TUPLE_INDEXING:
pass # no action needed
elif indexing_dispatch_code == _NDARRAY_ADVANCED_INDEXING:
self._set_np_advanced_indexing(key, value)
else:
raise ValueError(
'Indexing NDArray with index {} of type {} is not supported'
''.format(key, type(key))
)
def _prepare_value_nd(self, value, bcast_shape, squeeze_axes=None):
"""Return a broadcast `ndarray` with same context and dtype as ``self``.
For setting item, The returned `ndarray` is squeezed according to squeeze_axes since the
value_nd is assigned to not yet expanded space in original array.
`value`: numeric types or array like.
`bcast_shape`: a shape tuple.
`squeeze_axes`: a sequence of axes to squeeze in the value array.
Note: mxnet.numpy.ndarray not support NDArray as assigned value.
"""
if isinstance(value, numeric_types):
value_nd = full(bcast_shape, value, ctx=self.ctx, dtype=self.dtype)
elif isinstance(value, self.__class__):
value_nd = value.as_in_ctx(self.ctx)
if value_nd.dtype != self.dtype:
value_nd = value_nd.astype(self.dtype)
else:
try:
value_nd = array(value, ctx=self.ctx, dtype=self.dtype)
except:
raise TypeError('mxnet.np.ndarray does not support assignment with non-array-like '
'object {} of type {}'.format(value, type(value)))
# For advanced indexing setitem, if there is None in indices, we need to squeeze the
# assigned value_nd since None is also ignored in slicing the original array.
if squeeze_axes and value_nd.ndim > len(bcast_shape):
squeeze_axes = tuple([ax for ax in squeeze_axes if ax < len(value_nd.shape)])
value_nd = value_nd.squeeze(axis=tuple(squeeze_axes))
# handle the cases like the following
# a = np.zeros((3, 3)), b = np.ones((1, 1, 1, 1, 3)), a[0] = b
# b cannot broadcast directly to a[0].shape unless its leading 1-size axes are trimmed
if value_nd.ndim > len(bcast_shape):
squeeze_axes = []
for i in range(value_nd.ndim - len(bcast_shape)):
if value_nd.shape[i] == 1:
squeeze_axes.append(i)
else:
break
if squeeze_axes:
value_nd = value_nd.squeeze(squeeze_axes)
if value_nd.shape != bcast_shape:
if value_nd.size == 0:
value_nd = value_nd.reshape(bcast_shape)
else:
value_nd = value_nd.broadcast_to(bcast_shape)
return value_nd
@wrap_mxnp_np_ufunc
def __add__(self, other):
"""x.__add__(y) <=> x + y"""
return add(self, other)
@wrap_mxnp_np_ufunc
def __iadd__(self, other):
"""x.__iadd__(y) <=> x += y"""
if not self.writable:
raise ValueError('trying to add to a readonly ndarray')
return add(self, other, out=self)
def __invert__(self):
"""x.__invert__() <=> ~x"""
return invert(self)
@wrap_mxnp_np_ufunc
def __and__(self, other):
"""x.__and__(y) <=> x & y"""
return bitwise_and(self, other)
@wrap_mxnp_np_ufunc
def __or__(self, other):
"""x.__or__(y) <=> x | y"""
return bitwise_or(self, other)
@wrap_mxnp_np_ufunc
def __xor__(self, other):
"""x.__xor__(y) <=> x ^ y"""
return bitwise_xor(self, other)
@wrap_mxnp_np_ufunc
def __iand__(self, other):
"""x.__iand__(y) <=> x &= y"""
return bitwise_and(self, other, out=self)
@wrap_mxnp_np_ufunc
def __ior__(self, other):
r"""x.__ior__(y) <=> x \|= y"""
return bitwise_or(self, other, out=self)
@wrap_mxnp_np_ufunc
def __ixor__(self, other):
"""x.__ixor__(y) <=> x ^= y"""
return bitwise_xor(self, other, out=self)
def __round__(self, n=0):
"""x.__round__(n)"""
return round(self, decimals=n)
def __abs__(self):
"""x.__abs__()"""
return absolute(self)
def __ceil__(self):
"""x.__ceil__()"""
return ceil(self)
def __floor__(self):
"""x.__floor__()"""
return floor(self)
def __trunc__(self):
"""x.__trunc__()"""
return trunc(self)
@wrap_mxnp_np_ufunc
def __sub__(self, other):
"""x.__sub__(y) <=> x - y"""
return subtract(self, other)
@wrap_mxnp_np_ufunc
def __isub__(self, other):
"""x.__isub__(y) <=> x -= y"""
if not self.writable:
raise ValueError('trying to subtract from a readonly ndarray')
return subtract(self, other, out=self)
@wrap_mxnp_np_ufunc
def __rsub__(self, other):
"""x.__rsub__(y) <=> y - x"""
return subtract(other, self)
@wrap_mxnp_np_ufunc
def __mul__(self, other):
"""x.__mul__(y) <=> x * y"""
return multiply(self, other)
def __neg__(self):
return negative(self)
@wrap_mxnp_np_ufunc
def __imul__(self, other):
r"""x.__imul__(y) <=> x \*= y"""
if not self.writable:
raise ValueError('trying to add to a readonly ndarray')
return multiply(self, other, out=self)
@wrap_mxnp_np_ufunc
def __rmul__(self, other):
"""x.__rmul__(y) <=> y * x"""
return self.__mul__(other)
@wrap_mxnp_np_ufunc
def __div__(self, other):
"""x.__div__(y) <=> x / y"""
return divide(self, other)
@wrap_mxnp_np_ufunc
def __rdiv__(self, other):
"""x.__rdiv__(y) <=> y / x"""
return divide(other, self)
@wrap_mxnp_np_ufunc
def __idiv__(self, other):
"""x.__idiv__(y) <=> x /= y"""
return divide(self, other, out=self)
@wrap_mxnp_np_ufunc
def __truediv__(self, other):
"""x.__truediv__(y) <=> x / y"""
return divide(self, other)
@wrap_mxnp_np_ufunc
def __rtruediv__(self, other):
"""x.__rtruediv__(y) <=> y / x"""
return divide(other, self)
@wrap_mxnp_np_ufunc
def __itruediv__(self, other):
"""x.__itruediv__(y) <=> x /= y"""
return divide(self, other, out=self)
@wrap_mxnp_np_ufunc
def __mod__(self, other):
"""x.__mod__(y) <=> x % y"""
return mod(self, other)
@wrap_mxnp_np_ufunc
def __rmod__(self, other):
"""x.__rmod__(y) <=> y % x"""
return mod(other, self)
@wrap_mxnp_np_ufunc
def __imod__(self, other):
"""x.__imod__(y) <=> x %= y"""
return mod(self, other, out=self)
@wrap_mxnp_np_ufunc
def __pow__(self, other):
"""x.__pow__(y) <=> x ** y"""
return power(self, other)
@wrap_mxnp_np_ufunc
def __rpow__(self, other):
"""x.__rpow__(y) <=> y ** x"""
return power(other, self)
@wrap_mxnp_np_ufunc
def __eq__(self, other):
"""x.__eq__(y) <=> x == y"""
return equal(self, other)
def __hash__(self):
raise NotImplementedError
@wrap_mxnp_np_ufunc
def __ne__(self, other):
"""x.__ne__(y) <=> x != y"""
return not_equal(self, other)
@wrap_mxnp_np_ufunc
def __gt__(self, other):
"""x.__gt__(y) <=> x > y"""
return greater(self, other)
@wrap_mxnp_np_ufunc
def __ge__(self, other):
"""x.__ge__(y) <=> x >= y"""
return greater_equal(self, other)
@wrap_mxnp_np_ufunc
def __lt__(self, other):
"""x.__lt__(y) <=> x < y"""
return less(self, other)
@wrap_mxnp_np_ufunc
def __le__(self, other):
"""x.__le__(y) <=> x <= y"""
return less_equal(self, other)
@wrap_mxnp_np_ufunc
def __matmul__(self, other):
"""x.__matmul__(y) <=> x @ y"""
return matmul(self, other)
@wrap_mxnp_np_ufunc
def __rmatmul__(self, other):
"""x.__rmatmul__(y) <=> y @ x"""
return matmul(other, self)
@wrap_mxnp_np_ufunc
def __imatmul__(self, other):
"""x.__imatmul__(y) <=> x @= y"""
return matmul(self, other, out=self)
def __bool__(self):
num_elements = self.size
if num_elements == 0:
warnings.simplefilter('default')
warnings.warn('The truth value of an empty array is ambiguous. Returning False, but in'
' future this will result in an error.', DeprecationWarning)
return False
elif num_elements == 1:
return bool(self.item())
else:
raise ValueError("The truth value of an ndarray with multiple elements is ambiguous.")
__nonzero__ = __bool__
def __float__(self):
num_elements = self.size
if num_elements != 1:
raise TypeError('only size-1 arrays can be converted to Python scalars')
return float(self.item())
def __int__(self):
num_elements = self.size
if num_elements != 1:
raise TypeError('only size-1 arrays can be converted to Python scalars')
return int(self.item())
def __len__(self):
"""Number of elements along the first axis."""
shape = self.shape # pylint: disable=redefined-outer-name
if len(shape) == 0:
raise TypeError('len() of unsized object')
return self.shape[0]
def __reduce__(self):
return ndarray, (None,), self.__getstate__()
def item(self, *args):
"""Copy an element of an array to a standard Python scalar and return it.
Parameters
----------
*args : Arguments (variable number and type)
none: in this case, the method only works for arrays with one element (a.size == 1),
which element is copied into a standard Python scalar object and returned.
int_type: this argument is interpreted as a flat index into the array, specifying which
element to copy and return.
tuple of int_types: functions as does a single int_type argument, except that the
argument is interpreted as an nd-index into the array.
Returns
-------
z : Standard Python scalar object
A copy of the specified element of the array as a suitable Python scalar.
"""
# TODO(junwu): no need to call asnumpy() on the whole array.
return self.asnumpy().item(*args)
def nonzero(self):
"""Return the indices of the elements that are non-zero.
Refer to `numpy.nonzero` for full documentation.
See Also
--------
numpy.nonzero : equivalent function
"""
return nonzero(self)
@property
# pylint: disable= invalid-name, undefined-variable
def T(self):
"""Same as self.transpose(). This always returns a copy of self."""
return self.transpose()
# pylint: enable= invalid-name, undefined-variable
def all(self, axis=None, out=None, keepdims=False):
return _mx_nd_np.all(self, axis=axis, out=out, keepdims=keepdims)
def any(self, axis=None, out=None, keepdims=False):
return _mx_nd_np.any(self, axis=axis, out=out, keepdims=keepdims)
def as_nd_ndarray(self):
"""Convert mxnet.numpy.ndarray to mxnet.ndarray.NDArray to use its fluent methods."""
hdl = NDArrayHandle()
check_call(_LIB.MXShallowCopyNDArray(self.handle, ctypes.byref(hdl)))
return NDArray(handle=hdl, writable=self.writable)
def as_np_ndarray(self):
"""A convenience function for creating a numpy ndarray from the current ndarray
with zero copy. For this class, it just returns itself since it's already a
numpy ndarray."""
return self
def __repr__(self):
"""
Returns a string representation of the array.
The dtype of the ndarray will be appended if it's inconsistent with current dtype.
The context of the ndarray will be appended for devices other than CPU.
Examples
--------
>>> from mxnet import np, npx
>>> a = np.random.uniform(size=(2, 3))
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]])
>>> print(a)
[[0.5488135 0.5928446 0.71518934]
[0.84426576 0.60276335 0.8579456 ]]
>>> a.dtype
dtype('float32')
>>> npx.set_np_float64()
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]], dtype=float32)
>>> npx.set_np_float64(default_float64=False)
>>> a
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]])
>>> b = a.astype(np.float64)
>>> b
array([[0.54881352, 0.59284461, 0.71518934],
[0.84426576, 0.60276335, 0.85794562]], dtype=float64)
>>> print(b)
[[0.54881352 0.59284461 0.71518934]
[0.84426576 0.60276335 0.85794562]]
>>> b.dtype
dtype('float64')
>>> c = a.copyto(npx.gpu(0))
>>> c
array([[0.5488135 , 0.5928446 , 0.71518934],
[0.84426576, 0.60276335, 0.8579456 ]], ctx=gpu(0))
>>> print(c)
[[0.5488135 0.5928446 0.71518934]
[0.84426576 0.60276335 0.8579456 ]] @gpu(0)
>>> d = b.copyto(npx.gpu(0))
>>> d
array([[0.54881352, 0.59284461, 0.71518934],
[0.84426576, 0.60276335, 0.85794562]], dtype=float64, ctx=gpu(0))
>>> print(d)
[[0.54881352 0.59284461 0.71518934]
[0.84426576 0.60276335 0.85794562]] @gpu(0)
"""
if self._alive:
array_str = self.asnumpy().__repr__()
dtype = self.dtype
default_dtype = _np.float64 if is_np_default_dtype() else _np.float32
if 'dtype=' in array_str:
if dtype == default_dtype:
array_str = array_str[:array_str.rindex(',')] + ')'
elif dtype not in (default_dtype, _np.bool_):
array_str = array_str[:-1] + ', dtype={})'.format(dtype)
context = self.ctx
if context.device_type == 'cpu':
return array_str
return array_str[:-1] + ', ctx={})'.format(str(context))
else:
return '<FREED {}>'.format(self.__class__.__name__)
def __str__(self):
"""Returns a string representation of the array."""
array_str = self.asnumpy().__str__()
context = self.ctx
if context.device_type == 'cpu' or self.ndim == 0:
return array_str
return '{array} @{ctx}'.format(array=array_str, ctx=context)
def __format__(self, fmt):
"""Return value.__format__(format_spec). Overwrite to include 0-d array"""
if self.ndim == 0:
return self.item().__format__(fmt)
elif len(fmt) == 0:
return self.__str__().__format__(fmt)
else:
raise TypeError("Cannot format mxnet.numpy.ndarray with format_spec")
def attach_grad(self, grad_req='write'): # pylint: disable=arguments-differ
"""Attach a gradient buffer to this ndarray, so that `backward`
can compute gradient with respect to it.
Parameters
----------
grad_req : {'write', 'add', 'null'}
How gradient will be accumulated.
* 'write': gradient will be overwritten on every backward.
* 'add': gradient will be added to existing value on every backward.
* 'null': do not compute gradient for this NDArray.
"""
grad = _mx_nd_np.zeros_like(self) # pylint: disable=undefined-variable
grad_req = _GRAD_REQ_MAP[grad_req]
check_call(_LIB.MXAutogradMarkVariables(
1, ctypes.pointer(self.handle),
ctypes.pointer(mx_uint(grad_req)),
ctypes.pointer(grad.handle)))
@property
def grad(self):
"""Returns gradient buffer attached to this ndarray."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayGetGrad(self.handle, ctypes.byref(hdl)))
if hdl.value is None:
return None
return _np_ndarray_cls(hdl)
def detach(self):
"""Returns a new ndarray, detached from the current graph."""
hdl = NDArrayHandle()
check_call(_LIB.MXNDArrayDetach(self.handle, ctypes.byref(hdl)))
return _np_ndarray_cls(hdl)
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True): # pylint: disable=arguments-differ,unused-argument, too-many-arguments
"""
Copy of the array, cast to a specified type.
Parameters
----------
dtype : str or dtype
Typecode or data-type to which the array is cast.
order : {'C', 'F', 'A', 'K'}, optional
Controls the memory layout order of the result.
'C' means C order, 'F' means Fortran order, 'A'
means 'F' order if all the arrays are Fortran contiguous,
'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
Controls what kind of data casting may occur. Defaults to 'unsafe'
for backwards compatibility.
* 'no' means the data types should not be cast at all.
* 'equiv' means only byte-order changes are allowed.
* 'safe' means only casts which can preserve values are allowed.
* 'same_kind' means only safe casts or casts within a kind,
like float64 to float32, are allowed.
* 'unsafe' means any data conversions may be done.
subok : bool, optional
If True, then sub-classes will be passed-through (default), otherwise
the returned array will be forced to be a base-class array.
copy : bool, optional
Default `True`. By default, astype always returns a newly
allocated ndarray on the same context. If this is set to
`False`, and the dtype requested is the same as the ndarray's
dtype, the ndarray is returned instead of a copy.
Returns
-------
arr_t : ndarray
Unless `copy` is False and the other conditions for returning the input
array are satisfied (see description for `copy` input parameter), `arr_t`
is a new array of the same shape as the input array with `dtype`.
Notes
-----
This function differs from the official `ndarray`'s ``astype`` function in the following
aspects:
* `order` only supports 'C' and 'K'.
* `casting` only supports 'unsafe'.
* `subok` only supports ``True``.
"""
if order is not None and order != 'K' and order != 'C':
raise ValueError('order must be either \'K\' or \'C\'')
if casting != 'unsafe':
raise ValueError('casting must be equal to \'unsafe\'')
if not subok:
raise ValueError('subok must be equal to True')
if dtype is None:
dtype = _np.float32
if not copy and _np.dtype(dtype) == self.dtype:
return self
return _npi.cast(self, dtype=dtype)
def copyto(self, other):
"""Copies the value of this array to another array.
If ``other`` is a ``ndarray`` object, then ``other.shape`` and
``self.shape`` should be the same. This function copies the value from
``self`` to ``other``.
If ``other`` is a context, a new ``np.ndarray`` will be first created on
the target context, and the value of ``self`` is copied.
Parameters
----------
other : ndarray or Context
The destination array or context.
Returns
-------
out: ndarray
The copied array. If ``other`` is an ``ndarray``, then the return value
and ``other`` will point to the same ``ndarray``.
Examples
--------
>>> x = np.ones((2, 3))
>>> y = np.zeros((2, 3), ctx=npx.gpu(0))
>>> z = x.copyto(y)
>>> z is y
True
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if isinstance(other, ndarray):
if other.handle is self.handle:
warnings.warn('You are attempting to copy an array to itself', RuntimeWarning)
return False
return _npi.copyto(self, out=other)
elif isinstance(other, Context):
hret = ndarray(_new_alloc_handle(self.shape, other, True, self.dtype))
return _npi.copyto(self, out=hret)
else:
raise TypeError('copyto does not support type ' + str(type(other)))
def asscalar(self):
raise AttributeError('mxnet.numpy.ndarray object has no attribute asscalar')
def argmax(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the maximum values along the given axis.
Refer to `mxnet.numpy.argmax` for full documentation."""
return argmax(self, axis, out)
def as_in_context(self, context):
"""This function has been deprecated. Please refer to ``ndarray.as_in_ctx``."""
warnings.warn('ndarray.as_in_context has been renamed to'
' ndarray.as_in_ctx', DeprecationWarning)
return self.as_nd_ndarray().as_in_context(context).as_np_ndarray()
def as_in_ctx(self, ctx):
"""Returns an array on the target device with the same value as this array.
If the target context is the same as ``self.context``, then ``self`` is
returned. Otherwise, a copy is made.
Parameters
----------
context : Context
The target context.
Returns
-------
ndarray
The target array.
"""
if self.ctx == ctx:
return self
return self.copyto(ctx)
@property
def ctx(self):
"""Device context of the array.
Examples
--------
>>> x = np.array([1, 2, 3, 4])
>>> x.ctx
cpu(0)
>>> type(x.ctx)
<class 'mxnet.context.Context'>
>>> y = np.zeros((2, 3), npx.gpu(0))
>>> y.ctx
gpu(0)
"""
dev_typeid = ctypes.c_int()
dev_id = ctypes.c_int()
check_call(_LIB.MXNDArrayGetContext(
self.handle, ctypes.byref(dev_typeid), ctypes.byref(dev_id)))
return Context(Context.devtype2str[dev_typeid.value], dev_id.value)
@property
def context(self):
"""This function has been deprecated. Please refer to ``ndarray.ctx``."""
warnings.warn('ndarray.context has been renamed to ndarray.ctx', DeprecationWarning)
return self.as_nd_ndarray().context
def copy(self, order='C'): # pylint: disable=arguments-differ
"""Return a coyp of the array, keeping the same context.
Parameters
----------
order : str
The memory layout of the copy. Currently, only c-contiguous memory
layout is supported.
Examples
--------
>>> x = np.ones((2, 3))
>>> y = x.copy()
>>> y
array([[ 1., 1., 1.],
[ 1., 1., 1.]])
"""
if order != 'C':
raise NotImplementedError('ndarray.copy only supports order=\'C\', while '
'received {}'.format(str(order)))
return self.copyto(self.ctx)
def dot(self, b, out=None):
"""Dot product of two arrays.
Refer to ``numpy.dot`` for full documentation."""
return dot(self, b, out=out)
def reshape(self, *args, **kwargs): # pylint: disable=arguments-differ
"""Returns a copy of the array with a new shape.
Notes
-----
Unlike the free function `numpy.reshape`, this method on `ndarray` allows
the elements of the shape parameter to be passed in as separate arguments.
For example, ``a.reshape(10, 11)`` is equivalent to
``a.reshape((10, 11))``.
"""
order = 'C'
if len(kwargs) > 1:
raise TypeError('function takes at most 1 keyword argument')
if len(kwargs) == 1:
if 'order' not in kwargs:
raise TypeError("'{}' is an invalid keyword argument for this function"
.format(list(kwargs.keys())[0]))
order = kwargs.pop('order', 'C')
if order != 'C':
raise NotImplementedError('only supports C-order,'
' while received {}'.format(order))
if len(args) == 0:
raise TypeError('reshape() takes exactly 1 argument (0 given)')
if len(args) == 1 and isinstance(args[0], tuple):
return _mx_np_op.reshape(self, newshape=args[0], order=order)
else:
return _mx_np_op.reshape(self, newshape=args, order=order)
def reshape_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reshape_like`.
The arguments are the same as for :py:func:`reshape_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute reshape_like')
def reshape_view(self, *shape, **kwargs): # pylint: disable=redefined-outer-name
"""Returns a **view** of this array with a new shape without altering any data.
Inheritated from NDArray.reshape.
"""
return super(ndarray, self).reshape(*shape, **kwargs)
def zeros_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`zeros_like`.
The arguments are the same as for :py:func:`zeros_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute zeros_like')
def ones_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ones_like`.
The arguments are the same as for :py:func:`ones_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute ones_like')
def broadcast_axes(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`broadcast_axes`.
The arguments are the same as for :py:func:`broadcast_axes`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')
def repeat(self, repeats, axis=None): # pylint: disable=arguments-differ
"""Repeat elements of an array."""
return repeat(self, repeats=repeats, axis=axis)
def pad(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pad`.
The arguments are the same as for :py:func:`pad`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute pad')
def swapaxes(self, axis1, axis2): # pylint: disable=arguments-differ
"""Return a copy of the array with axis1 and axis2 interchanged.
Refer to `mxnet.numpy.swapaxes` for full documentation.
"""
return swapaxes(self, axis1, axis2)
def split(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split`.
The arguments are the same as for :py:func:`split`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute split')
def split_v2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`split_v2`.
The arguments are the same as for :py:func:`split_v2`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute split_v2')
def slice(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice`.
The arguments are the same as for :py:func:`slice`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice')
def slice_axis(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_axis`.
The arguments are the same as for :py:func:`slice_axis`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_axis')
def slice_like(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`slice_like`.
The arguments are the same as for :py:func:`slice_like`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute slice_like')
def slice_assign_scalar(self, value, begin, end, step):
"""
Assign the scalar to a cropped subset of this ndarray. Value will broadcast to the shape of the cropped shape
and will be cast to the same dtype of the ndarray.
Parameters
----------
value: numeric value
Value and this ndarray should be of the same data type.
The shape of rhs should be the same as the cropped shape of this ndarray.
begin: tuple of begin indices
end: tuple of end indices
step: tuple of step lenghths
Returns
-------
This ndarray.
Examples
--------
>>> x = np.ones((2, 2, 2))
>>> y = x.slice_assign_scalar(0, (0, 0, None), (1, 1, None), (None, None, None))
>>> y
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> x
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return _npi.slice_assign_scalar(self, value, begin=begin, end=end, step=step, out=self)
def slice_assign(self, rhs, begin, end, step):
"""
Assign the rhs to a cropped subset of this ndarray in place.
Returns the view of this ndarray.
Parameters
----------
rhs: ndarray.
rhs and this NDArray should be of the same data type, and on the same device.
The shape of rhs should be the same as the cropped shape of this ndarray.
begin: tuple of begin indices
end: tuple of end indices
step: tuple of step lenghths
Returns
-------
out : ndarray
This ndarray.
Examples
--------
>>> x = np.ones((2, 2, 2))
>>> assigned = np.zeros((1, 1, 2))
>>> y = x.slice_assign(assigned, (0, 0, None), (1, 1, None), (None, None, None))
>>> y
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> x
array([[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
"""
return _npi.slice_assign(self, rhs, begin=begin, end=end, step=step, out=self)
def take(self, indices, axis=None, mode='raise'): # pylint: disable=arguments-differ, redefined-outer-name
"""Convenience fluent method for :py:func:`take`.
The arguments are the same as for :py:func:`take`, with
this array as data.
"""
return take(self, indices, axis, mode=mode)
def one_hot(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`one_hot`.
The arguments are the same as for :py:func:`one_hot`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute one_hot')
def pick(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`pick`.
The arguments are the same as for :py:func:`pick`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute pick')
def sort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`sort`.
The arguments are the same as for :py:func:`sort`, with
this array as data.
"""
raise sort(self, axis=axis, kind=kind, order=order)
def topk(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`topk`.
The arguments are the same as for :py:func:`topk`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute topk')
def argsort(self, axis=-1, kind=None, order=None): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`argsort`.
The arguments are the same as for :py:func:`argsort`, with
this array as data.
"""
return argsort(self, axis=axis, kind=kind, order=order)
def argmax_channel(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`argmax_channel`.
The arguments are the same as for :py:func:`argmax_channel`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute argmax_channel')
def argmin(self, axis=None, out=None): # pylint: disable=arguments-differ
"""Return indices of the minium values along the given axis.
Refer to `mxnet.numpy.argmin` for full documentation."""
return argmin(self, axis, out)
def clip(self, min=None, max=None, out=None): # pylint: disable=arguments-differ
"""Return an array whose values are limited to [min, max].
One of max or min must be given.
"""
return clip(self, min, max, out=out)
def abs(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`abs`.
The arguments are the same as for :py:func:`abs`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute abs')
def sign(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sign`.
The arguments are the same as for :py:func:`sign`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sign')
def flatten(self, order='C'): # pylint: disable=arguments-differ
"""Return a copy of the array collapsed into one dimension."""
return self.reshape(-1, order=order)
def shape_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`shape_array`.
The arguments are the same as for :py:func:`shape_array`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute shape_array')
def size_array(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`size_array`.
The arguments are the same as for :py:func:`size_array`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute size_array')
def expand_dims(self, *args, **kwargs): # pylint: disable=arguments-differ,unused-argument
"""Convenience fluent method for :py:func:`expand_dims`.
The arguments are the same as for :py:func:`expand_dims`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute expand_dims')
def tile(self, reps): # pylint: disable=arguments-differ
"""Construct an array by repeating A the number of times given by reps.
Refer to `mxnet.numpy.tile` for full documentation."""
return tile(self, reps=reps)
def transpose(self, *axes): # pylint: disable=arguments-differ
"""Permute the dimensions of an array."""
if len(axes) == 0:
axes = None
elif len(axes) == 1:
if isinstance(axes[0], (tuple, list)):
axes = axes[0]
elif axes[0] is None:
axes = None
return transpose(self, axes=axes)
def flip(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`flip`.
The arguments are the same as for :py:func:`flip`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute flip')
def depth_to_space(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`depth_to_space`.
The arguments are the same as for :py:func:`depth_to_space`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute depth_to_space')
def space_to_depth(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`space_to_depth`.
The arguments are the same as for :py:func:`space_to_depth`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute space_to_depth')
def diag(self, k=0, **kwargs):
"""Convenience fluent method for :py:func:`diag`.
The arguments are the same as for :py:func:`diag`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute diag')
def diagonal(self, offset=0, axis1=0, axis2=1): # pylint: disable=arguments-differ
"""Return the diagonal with the given offset.
If array has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-array whose diagonal is returned.
Refer to `mxnet.numpy.diagonal` for full documents.
"""
return diagonal(self, offset=offset, axis1=axis1, axis2=axis2)
def sum(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the sum of the array elements over the given axis."""
return sum(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def nansum(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nansum`.
The arguments are the same as for :py:func:`nansum`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute nansum')
def prod(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the product of the array elements over the given axis."""
return _mx_np_op.prod(self, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
def nanprod(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`nanprod`.
The arguments are the same as for :py:func:`nanprod`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute nanprod')
def mean(self, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Returns the average of the array elements along given axis."""
return mean(self, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
# pylint: disable=too-many-arguments, arguments-differ
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""Returns the standard deviation of the array elements along given axis."""
return std(self, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""Returns the variance of the array elements, along given axis."""
return var(self, axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
# pylint: enable=too-many-arguments, arguments-differ
def cumsum(self, axis=None, dtype=None, out=None):
"""Return the cumulative sum of the elements along the given axis."""
return _mx_nd_np.cumsum(self, axis=axis, dtype=dtype, out=out)
def tolist(self):
return self.asnumpy().tolist()
def max(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Return the maximum along a given axis."""
return _mx_nd_np.max(self, axis=axis, out=out, keepdims=keepdims)
def min(self, axis=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`min`.
The arguments are the same as for :py:func:`min`, with
this array as data.
"""
return _mx_nd_np.min(self, axis=axis, out=out, keepdims=keepdims)
def norm(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`norm`.
The arguments are the same as for :py:func:`norm`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute norm')
def round(self, decimals=0, out=None, **kwargs): # pylint: disable=arguments-differ
"""Convenience fluent method for :py:func:`round`.
The arguments are the same as for :py:func:`round`, with
this array as data.
"""
return round(self, decimals=decimals, out=out, **kwargs)
def rint(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rint`.
The arguments are the same as for :py:func:`rint`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rint')
def fix(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`fix`.
The arguments are the same as for :py:func:`fix`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute fix')
def floor(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`floor`.
The arguments are the same as for :py:func:`floor`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute floor')
def ceil(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`ceil`.
The arguments are the same as for :py:func:`ceil`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute ceil')
def trunc(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`trunc`.
The arguments are the same as for :py:func:`trunc`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute trunc')
def sin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sin`.
The arguments are the same as for :py:func:`sin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sin')
def cos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cos`.
The arguments are the same as for :py:func:`cos`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cos')
def tan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tan`.
The arguments are the same as for :py:func:`tan`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tan')
def arcsin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsin`.
The arguments are the same as for :py:func:`arcsin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsin')
def arccos(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccos`.
The arguments are the same as for :py:func:`arccos`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arccos')
def arctan(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctan`.
The arguments are the same as for :py:func:`arctan`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arctan')
def degrees(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`degrees`.
The arguments are the same as for :py:func:`degrees`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute degrees')
def radians(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`radians`.
The arguments are the same as for :py:func:`radians`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute radians')
def sinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sinh`.
The arguments are the same as for :py:func:`sinh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sinh')
def cosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cosh`.
The arguments are the same as for :py:func:`cosh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cosh')
def tanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`tanh`.
The arguments are the same as for :py:func:`tanh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute tanh')
def arcsinh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arcsinh`.
The arguments are the same as for :py:func:`arcsinh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arcsinh')
def arccosh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arccosh`.
The arguments are the same as for :py:func:`arccosh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arccosh')
def arctanh(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`arctanh`.
The arguments are the same as for :py:func:`arctanh`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute arctanh')
def exp(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`exp`.
The arguments are the same as for :py:func:`exp`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute exp')
def expm1(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`expm1`.
The arguments are the same as for :py:func:`expm1`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute expm1')
def log(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log`.
The arguments are the same as for :py:func:`log`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log')
def log10(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log10`.
The arguments are the same as for :py:func:`log10`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log10')
def log2(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log2`.
The arguments are the same as for :py:func:`log2`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log2')
def log1p(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log1p`.
The arguments are the same as for :py:func:`log1p`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log1p')
def sqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sqrt`.
The arguments are the same as for :py:func:`sqrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sqrt')
def rsqrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rsqrt`.
The arguments are the same as for :py:func:`rsqrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rsqrt')
def cbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`cbrt`.
The arguments are the same as for :py:func:`cbrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute cqrt')
def rcbrt(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`rcbrt`.
The arguments are the same as for :py:func:`rcbrt`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute rcqrt')
def square(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`square`.
The arguments are the same as for :py:func:`square`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute square')
def reciprocal(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`reciprocal`.
The arguments are the same as for :py:func:`reciprocal`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute reciprocal')
def relu(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`relu`.
The arguments are the same as for :py:func:`relu`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute relu')
def sigmoid(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`sigmoid`.
The arguments are the same as for :py:func:`sigmoid`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute sigmoid')
def softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmax`.
The arguments are the same as for :py:func:`softmax`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute softmax')
def log_softmax(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`log_softmax`.
The arguments are the same as for :py:func:`log_softmax`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute log_softmax')
def softmin(self, *args, **kwargs):
"""Convenience fluent method for :py:func:`softmin`.
The arguments are the same as for :py:func:`softmin`, with
this array as data.
"""
raise AttributeError('mxnet.numpy.ndarray object has no attribute softmin')
def squeeze(self, axis=None): # pylint: disable=arguments-differ
"""Remove single-dimensional entries from the shape of a."""
return squeeze(self, axis=axis)
def broadcast_to(self, shape): # pylint: disable=redefined-outer-name
return _mx_nd_np.broadcast_to(self, shape)
def broadcast_like(self, other):
raise AttributeError('mxnet.numpy.ndarray object has no attribute broadcast_like')
def _full(self, value):
"""
Currently for internal use only. Implemented for __setitem__.
Assign to self an array of self's same shape and type, filled with value.
"""
return _mx_nd_np.full(self.shape, value, ctx=self.ctx, dtype=self.dtype, out=self)
# pylint: disable=redefined-outer-name
def _scatter_set_nd(self, value_nd, indices):
"""
This is added as an ndarray class method in order to support polymorphism in NDArray and numpy.ndarray indexing
"""
return _npi.scatter_set_nd(
lhs=self, rhs=value_nd, indices=indices, shape=self.shape, out=self
)
# pylint: enable=redefined-outer-name
@property
def shape(self):
"""Tuple of array dimensions.
Examples
--------
>>> x = mx.np.array([1, 2, 3, 4])
>>> x.shape
(4L,)
>>> y = mx.np.zeros((2, 3, 4))
>>> y.shape
(2L, 3L, 4L)
>>> z = mx.np.array(3)
>>> z.shape
()
"""
num_dim = mx_int()
if _int64_enabled():
pdata = ctypes.POINTER(mx_int64)()
check_call(_LIB.MXNDArrayGetShape64(
self.handle, ctypes.byref(num_dim), ctypes.byref(pdata)))
else:
pdata = ctypes.POINTER(mx_int)()
check_call(_LIB.MXNDArrayGetShape(
self.handle, ctypes.byref(num_dim), ctypes.byref(pdata)))
if num_dim.value == -1:
return None
else:
return tuple(pdata[:num_dim.value]) # pylint: disable=invalid-slice-index
@property
def ndim(self):
"""Number of array dimensions."""
return len(self.shape)
@property
def size(self):
"""Number of elements in the array."""
return super(ndarray, self).size
@property
def dtype(self):
"""Data-type of the array's elements.
Returns
-------
numpy.dtype
This NDArray's data type.
Examples
--------
>>> x = np.zeros((2,3))
>>> x.dtype
dtype('float32')
>>> y = np.zeros((2,3), dtype='int32')
>>> y.dtype
dtype('int32')
"""
return _np.dtype(super(ndarray, self).dtype)
def tostype(self, stype):
raise AttributeError('mxnet.numpy.ndarray object has no attribute tostype')
@set_module('mxnet.numpy')
def empty(shape, dtype=float, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, without initializing entries.
Parameters
----------
shape : int or tuple of int Shape of the empty array, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
Desired output data-type for the array, e.g, `numpy.int8`.
Note that this behavior is different from NumPy's `empty` function where `float64`
is the default value, here you can set your default dtype as 'float32' or 'float64'
because `float32` is considered as the default data type in deep learning.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data of the given shape, dtype, and order.
Examples
--------
>>> np.empty([2, 2])
array([[ 0.000000e+00, -2.524355e-29],
[ nan, -8.592023e+09]]) # uninitialized
>>> np.empty([2, 2], dtype=int)
array([[8751743591039004782, 3196766424264760104],
[7583328881310196768, 562950123910254]], dtype=int64) # uninitialized
"""
if order != 'C':
raise NotImplementedError('`empty` only supports order equal to `C`, while received {}'
.format(str(order)))
if ctx is None:
ctx = current_context()
if dtype is None or dtype is float:
dtype = _np.float64 if is_np_default_dtype() else _np.float32
if isinstance(shape, int):
shape = (shape,)
return ndarray(handle=_new_alloc_handle(shape, ctx, False, dtype))
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def array(object, dtype=None, ctx=None):
"""
Create an array.
Parameters
----------
object : array_like or `numpy.ndarray` or `mxnet.numpy.ndarray`
An array, any object exposing the array interface, an object whose
__array__ method returns an array, or any (nested) sequence.
dtype : data-type, optional
The desired data-type for the array.
The default dtype is ``object.dtype`` if `object` is an `ndarray`, `float32` otherwise.
Default dtype can be set to be consistent with offical numpy by `npx.set_np(dtype=True)`.
* When npx.is_np_default_dtype() returns False, default dtype is float32;
* When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
out : ndarray
An array object satisfying the specified requirements.
Examples
--------
>>> np.array([1, 2, 3])
array([1., 2., 3.])
>>> np.array([[1, 2], [3, 4]])
array([[1., 2.],
[3., 4.]])
>>> np.array([[1, 0], [0, 1]], dtype=bool)
array([[ True, False],
[False, True]])
>>> np.array([1, 2, 3]).dtype
dtype('float32')
>>> npx.set_np(dtype=True)
>>> np.array([1, 2, 3]).dtype
dtype('float64')
"""
if ctx is None:
ctx = current_context()
if isinstance(object, _np.ndarray):
if is_np_default_dtype():
dtype = object.dtype if dtype is None else dtype
else:
dtype = _np.float32 if dtype is None or object.dtype is _np.float64 else dtype
if isinstance(object, ndarray):
dtype = object.dtype if dtype is None else dtype
elif isinstance(object, NDArray):
raise ValueError("If you're trying to create a mxnet.numpy.ndarray "
"from mx.nd.NDArray, please use the zero-copy as_np_ndarray function.")
else:
if dtype is None:
default_dtype = _np.float64 if is_np_default_dtype() else _np.float32
dtype = object.dtype if hasattr(object, "dtype") else default_dtype
try:
object = _np.array(object, dtype=dtype)
except Exception as e:
# printing out the error raised by official NumPy's array function
# for transparency on users' side
raise TypeError('{}'.format(str(e)))
ret = empty(object.shape, dtype=dtype, ctx=ctx)
if len(object.shape) == 0:
ret[()] = object
else:
ret[:] = object
return ret
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def shape(a):
"""
Return the shape of an array.
Parameters
----------
a : array_like
Input array.
Returns
-------
shape : tuple of ints
The elements of the shape tuple give the lengths of the
corresponding array dimensions.
See Also
--------
ndarray.shape : Equivalent array method.
Examples
--------
>>> np.shape(np.eye(3))
(3, 3)
>>> np.shape([[1, 2]])
(1, 2)
>>> np.shape([0])
(1,)
>>> np.shape(0)
()
"""
return _mx_nd_np.shape(a)
@set_module('mxnet.numpy')
def zeros(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with zeros.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that this behavior is different from NumPy's `zeros` function where `float64`
is the default value, here we can set 'float32' or 'float64' as your default dtype,
because `float32` is considered as the default data type in deep learning.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of zeros with the given shape, dtype, and ctx.
Examples
--------
>>> np.zeros(5)
array([0., 0., 0., 0., 0.])
>>> np.zeros((5,), dtype=int)
array([0, 0, 0, 0, 0], dtype=int64)
>>> np.zeros((2, 1))
array([[0.],
[0.]])
"""
return _mx_nd_np.zeros(shape, dtype, order, ctx)
@set_module('mxnet.numpy')
def ones(shape, dtype=None, order='C', ctx=None): # pylint: disable=redefined-outer-name
"""Return a new array of given shape and type, filled with ones.
This function currently only supports storing multi-dimensional data
in row-major (C-style).
Parameters
----------
shape : int or tuple of int
The shape of the empty array.
dtype : str or numpy.dtype, optional
An optional value type. Default is depend on your current default dtype.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that this behavior is different from NumPy's `ones` function where
`float64` is the default value.
order : {'C'}, optional, default: 'C'
How to store multi-dimensional data in memory, currently only row-major
(C-style) is supported.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
Array of ones with the given shape, dtype, and ctx.
Examples
--------
>>> np.ones(5)
array([1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1], dtype=int64)
>>> np.ones((2, 1))
array([[1.],
[1.]])
>>> s = (2,2)
>>> np.ones(s)
array([[1., 1.],
[1., 1.]])
"""
return _mx_nd_np.ones(shape, dtype, order, ctx)
@set_module('mxnet.numpy')
def broadcast_to(array, shape): # pylint: disable=redefined-outer-name
"""
Broadcast an array to a new shape.
Parameters
----------
array : ndarray or scalar
The array to broadcast.
shape : tuple
The shape of the desired array.
Returns
-------
broadcast : array
A readonly view on the original array with the given shape. It is
typically not contiguous. Furthermore, more than one element of a
broadcasted array may refer to a single memory location.
Raises
------
MXNetError
If the array is not compatible with the new shape according to NumPy's
broadcasting rules.
"""
return _mx_nd_np.broadcast_to(array, shape)
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def full(shape, fill_value, dtype=None, order='C', ctx=None, out=None):
r"""Return a new array of given shape and type, filled with `fill_value`.
Parameters
----------
shape : int or sequence of ints
Shape of the new array, e.g., ``(2, 3)`` or ``2``.
fill_value : scalar or ndarray
Fill value.
dtype : data-type, optional
The desired data-type for the array. The default, `None`, means
`np.array(fill_value).dtype`.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx : mxnet.context.Context
The device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the given shape, dtype, and order.
If `fill_value` is an ndarray, out will have the same context as `fill_value`
regardless of the provided `ctx`.
.. note::
This function differs from the original numpy.full in the following way(s):
* Has an additional `ctx` argument to specify the device
* Has an additional `out` argument
* Currently does not support `order` selection
See Also
--------
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Examples
--------
>>> np.full((2, 2), 10)
array([[10., 10.],
[10., 10.]])
>>> np.full((2, 2), 2, dtype=np.int32, ctx=mx.cpu(0))
array([[2, 2],
[2, 2]], dtype=int32)
"""
return _mx_nd_np.full(shape, fill_value, order=order, ctx=ctx, dtype=dtype, out=out)
# pylint: enable=too-many-arguments, redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def empty_like(prototype, dtype=None, order='C', subok=False, shape=None): # pylint: disable=W0621
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
prototype : ndarray
The shape and data-type of `prototype` define these same attributes
of the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
subok : {False}, optional
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to False.
(Only support False at this moment)
shape : int or sequence of ints, optional.
Overrides the shape of the result. If order='K' and the number of
dimensions is unchanged, will try to keep order, otherwise,
order='C' is implied.
(Not supported at this moment)
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `prototype`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
empty : Return a new uninitialized array.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.empty_like(a)
array([[-5764607523034234880, -2305834244544065442, 4563075075], # uninitialized
[ 4567052944, -5764607523034234880, 844424930131968]])
>>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
>>> np.empty_like(a)
array([[4.9e-324, 9.9e-324, 1.5e-323], # uninitialized
[2.0e-323, 2.5e-323, 3.0e-323]])
"""
return _mx_nd_np.empty_like(prototype, dtype=dtype, order=order, subok=subok, shape=shape)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def all(a, axis=None, out=None, keepdims=False):
"""
Test whether all array elements along a given axis evaluate to True.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (axis = None) is to perform a logical AND over
all the dimensions of the input array.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
Returns
--------
all : ndarray, bool
A new boolean or array is returned unless out is specified,
in which case a reference to out is returned.
Examples:
---------
>>> np.all([[True,False],[True,True]])
False
>>> np.all([[True,False],[True,True]], axis=0)
array([ True, False])
>>> np.all([-1, 4, 5])
True
>>> np.all([1.0, np.nan])
True
>>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
>>> id(z), id(o), z
(28293632, 28293632, array(True)) # may vary
"""
return _mx_nd_np.all(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def any(a, axis=None, out=None, keepdims=False):
"""
Test whether any array element along a given axis evaluates to True.
Returns single boolean unless axis is not None
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
axis : None or int or tuple of ints, optional
Axis or axes along which a logical AND reduction is performed.
The default (axis = None) is to perform a logical AND over
all the dimensions of the input array.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in
the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output and its type is preserved
Returns
--------
any : bool or ndarray
A new boolean or ndarray is returned unless out is specified,
in which case a reference to out is returned.
Examples:
---------
>>> np.any([[True, False], [True, True]])
True
>>> np.any([[True, False], [False, False]], axis=0)
array([ True, False])
>>> np.any([-1, 0, 5])
True
>>> np.any(np.nan)
True
>>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
(array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
>>> id(z), id(o) # identity of z and o # doctest: +SKIP
(191614240, 191614240)
"""
return _mx_nd_np.any(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def identity(n, dtype=None, ctx=None):
"""
Return the identity array.
The identity array is a square array with ones on
the main diagonal.
Parameters
----------
n : int
Number of rows (and columns) in `n` x `n` output.
dtype : data-type, optional
Data-type of the output.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
`n` x `n` array with its main diagonal set to one,
and all other elements 0.
Examples
--------
>>> np.identity(3)
>>> np.identity(3)
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
"""
return _mx_nd_np.identity(n, dtype, ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def take(a, indices, axis=None, mode='raise', out=None):
r"""
Take elements from an array along an axis.
When axis is not None, this function does the same thing as "fancy"
indexing (indexing arrays using arrays); however, it can be easier to use
if you need elements along a given axis. A call such as
``np.take(arr, indices, axis=3)`` is equivalent to
``arr[:,:,:,indices,...]``.
Explained without fancy indexing, this is equivalent to the following use
of `ndindex`, which sets each of ``ii``, ``jj``, and ``kk`` to a tuple of
indices::
Ni, Nk = a.shape[:axis], a.shape[axis+1:]
Nj = indices.shape
for ii in ndindex(Ni):
for jj in ndindex(Nj):
for kk in ndindex(Nk):
out[ii + jj + kk] = a[ii + (indices[jj],) + kk]
Parameters
----------
a : ndarray
The source array.
indices : ndarray
The indices of the values to extract. Also allow scalars for indices.
axis : int, optional
The axis over which to select values. By default, the flattened
input array is used.
out : ndarray, optional
If provided, the result will be placed in this array. It should
be of the appropriate shape and dtype.
mode : {'clip', 'wrap'}, optional
Specifies how out-of-bounds indices will behave.
* 'clip' -- clip to the range (default)
* 'wrap' -- wrap around
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
that this disables indexing with negative numbers.
Returns
-------
out : ndarray
The returned array has the same type as `a`.
.. note::
This function differs from the original `numpy.take
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.take.html>`_ in
the following way(s):
* Only ndarray or scalar ndarray is accepted as valid input.
Examples
--------
>>> a = np.array([4, 3, 5, 7, 6, 8])
>>> indices = np.array([0, 1, 4])
>>> np.take(a, indices)
array([4., 3., 6.])
In this example for `a` is an ndarray, "fancy" indexing can be used.
>>> a[indices]
array([4., 3., 6.])
If `indices` is not one dimensional, the output also has these dimensions.
>>> np.take(a, np.array([[0, 1], [2, 3]]))
array([[4., 3.],
[5., 7.]])
"""
return _mx_nd_np.take(a, indices, axis, mode, out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def unique(ar, return_index=False, return_inverse=False, return_counts=False, axis=None):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements:
* the indices of the input array that give the unique values
* the indices of the unique array that reconstruct the input array
* the number of times each unique value comes up in the input array
Parameters
----------
ar : ndarray
Input array. Unless `axis` is specified, this will be flattened if it
is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` (along the specified axis,
if provided, or in the flattened array) that result in the unique array.
return_inverse : bool, optional
If True, also return the indices of the unique array (for the specified
axis, if provided) that can be used to reconstruct `ar`.
return_counts : bool, optional
If True, also return the number of times each unique item appears
in `ar`.
axis : int or None, optional
The axis to operate on. If None, `ar` will be flattened. If an integer,
the subarrays indexed by the given axis will be flattened and treated
as the elements of a 1-D array with the dimension of the given axis,
see the notes for more details. The default is None.
Returns
-------
unique : ndarray
The sorted unique values.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. note::
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
treated in the same way as any other 1-D array. The result is that the
flattened subarrays are sorted in lexicographic order starting with the
first element.
This function differs from the original `numpy.unique
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.unique.html>`_ in
the following aspects:
* Only support ndarray as input.
* Object arrays or structured arrays are not supported.
Examples
--------
>>> np.unique(np.array([1, 1, 2, 2, 3, 3]))
array([1., 2., 3.])
>>> a = np.array([[1, 1], [2, 3]])
>>> np.unique(a)
array([1., 2., 3.])
Return the unique rows of a 2D array
>>> a = np.array([[1, 0, 0], [1, 0, 0], [2, 3, 4]])
>>> np.unique(a, axis=0)
array([[1., 0., 0.],
[2., 3., 4.]])
Return the indices of the original array that give the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_index=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 5, 3, 2], dtype=int64)
>>> a[indices]
array([1., 2., 3., 4., 6.])
Reconstruct the input array from the unique values:
>>> a = np.array([1, 2, 6, 4, 2, 3, 2])
>>> u, indices = np.unique(a, return_inverse=True)
>>> u
array([1., 2., 3., 4., 6.])
>>> indices
array([0, 1, 4, 3, 1, 2, 1], dtype=int64)
>>> u[indices]
array([1., 2., 6., 4., 2., 3., 2.])
"""
return _mx_nd_np.unique(ar, return_index, return_inverse, return_counts, axis)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def add(x1, x2, out=None, **kwargs):
"""
Add arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be added. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
The sum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.add(1.0, 4.0)
5.0
>>>
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.add(x1, x2)
array([[ 0., 2., 4.],
[ 3., 5., 7.],
[ 6., 8., 10.]])
"""
return _mx_nd_np.add(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def subtract(x1, x2, out=None, **kwargs):
r"""Subtract arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be subtracted from each other. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape
of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
subtract : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.subtract(1.0, 4.0)
-3.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.subtract(x1, x2)
array([[0., 0., 0.],
[3., 3., 3.],
[6., 6., 6.]])
"""
return _mx_nd_np.subtract(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def multiply(x1, x2, out=None, **kwargs):
"""
Multiply arguments element-wise.
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays to be multiplied. If x1.shape != x2.shape, they must be broadcastable to
a common shape (which may be the shape of one or the other).
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The difference of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), not supported yet.
Examples
--------
>>> np.multiply(2.0, 4.0)
8.0
>>> x1 = np.arange(9.0).reshape((3, 3))
>>> x2 = np.arange(3.0)
>>> np.multiply(x1, x2)
array([[ 0., 1., 4.],
[ 0., 4., 10.],
[ 0., 7., 16.]])
"""
return _mx_nd_np.multiply(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def divide(x1, x2, out=None, **kwargs):
"""Returns a true division of the inputs, element-wise.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types including boolean, the output is of float32 or
float64 type, which depends on your current default dtype:
* When ``npx.is_np_default_dtype()`` returns False, default dtype is float32.
* When ``npx.is_np_default_dtype()`` returns True, default dtype is float64.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.true_divide(x, 4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
"""
return _mx_nd_np.divide(x1, x2, out=out)
@set_module('mxnet.numpy')
def true_divide(x1, x2, out=None):
"""Returns a true division of the inputs, element-wise.
Instead of the Python traditional 'floor division', this returns a true
division. True division adjusts the output type to present the best
answer, regardless of input types.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
.. note::
This operator now supports automatic type promotion. The resulting type will be determined
according to the following rules:
* If both inputs are of floating number types, the output is the more precise type.
* If only one of the inputs is floating number type, the result is that type.
* If both inputs are of integer types (including boolean), the output is of float32 or
float64 type, which depends on your current default dtype.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Examples
--------
>>> x = np.arange(5)
>>> np.true_divide(x, 4)
array([0. , 0.25, 0.5 , 0.75, 1. ])
"""
return _mx_nd_np.true_divide(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def mod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.mod(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.mod(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmod(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmod(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.fmod(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def matmul(a, b, out=None, **kwargs):
r"""Matrix product of two arrays.
Parameters
----------
a, b : ndarray
Input arrays, scalars not allowed.
out : ndarray, optional
A location into which the result is stored.
If provided, it must have a shape that matches the signature (n,k),(k,m)->(n,m).
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The matrix product of the inputs.
This is a scalar only when both x1, x2 are 1-d vectors.
Raises
------
MXNetError
If the last dimension of a is not the same size as the second-to-last dimension of b.
If a scalar value is passed in.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : alternative matrix product with different broadcasting rules.
einsum : Einstein summation convention.
.. note::
The behavior depends on the arguments in the following way.
* If both arguments are ``2-D`` they are multiplied like conventional matrices.
* If either argument is ``N-D``, ``N > 2``, it is treated as a stack of matrices
residing in the last two indexes and broadcast accordingly.
* If the first argument is ``1-D``, it is promoted to a matrix by prepending
a 1 to its dimensions. After matrix multiplication the prepended 1 is removed.
* If the second argument is ``1-D``, it is promoted to a matrix by appending a 1
to its dimensions. After matrix multiplication the appended 1 is removed.
matmul differs from dot in two important ways:
* Multiplication by scalars is not allowed, use multiply instead.
* Stacks of matrices are broadcast together as if the matrices were elements,
respecting the signature ``(n,k),(k,m)->(n,m)``:
>>> a = np.ones([9, 5, 7, 4])
>>> c = np.ones([9, 5, 4, 3])
>>> np.dot(a, c).shape
(9, 5, 7, 9, 5, 3)
>>> np.matmul(a, c).shape
(9, 5, 7, 3)
>>> # n is 7, k is 4, m is 3
Examples
--------
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([[4, 1],
... [2, 2]])
>>> np.matmul(a, b)
array([[4., 1.],
[2., 2.]])
For 2-D mixed with 1-D, the result is the usual.
>>> a = np.array([[1, 0],
... [0, 1]])
>>> b = np.array([1, 2])
>>> np.matmul(a, b)
array([1., 2.])
>>> np.matmul(b, a)
array([1., 2.])
Broadcasting is conventional for stacks of arrays
>>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
>>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
>>> np.matmul(a, b).shape
(2, 2, 2)
>>> np.matmul(a, b)[0, 1, 1]
array(98.)
>>> sum(a[0, 1, :] * b[0, :, 1])
array(98.)
Scalar multiplication raises an error.
>>> np.matmul([1, 2], 3)
Traceback (most recent call last):
...
mxnet.base.MXNetError: ... : Multiplication by scalars is not allowed.
"""
return _mx_nd_np.matmul(a, b, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def remainder(x1, x2, out=None, **kwargs):
"""
Return element-wise remainder of division.
Parameters
----------
x1 : ndarray or scalar
Dividend array.
x2 : ndarray or scalar
Divisor array.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.remainder(np.arange(7), 5)
array([0., 1., 2., 3., 4., 0., 1.])
"""
return _mx_nd_np.remainder(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def power(x1, x2, out=None, **kwargs):
"""
First array elements raised to powers from second array, element-wise.
Parameters
----------
x1 : ndarray or scalar
The bases.
x2 : ndarray or scalar
The exponent.
out : ndarray
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
out : ndarray or scalar
The bases in x1 raised to the exponents in x2.
This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> x1 = np.arange(6)
>>> np.power(x1, 3)
array([ 0., 1., 8., 27., 64., 125.])
Raise the bases to different exponents.
>>> x2 = np.array([1.0, 2.0, 3.0, 3.0, 2.0, 1.0])
>>> np.power(x1, x2)
array([ 0., 1., 8., 27., 16., 5.])
The effect of broadcasting.
>>> x2 = np.array([[1, 2, 3, 3, 2, 1], [1, 2, 3, 3, 2, 1]])
>>> x2
array([[1., 2., 3., 3., 2., 1.],
[1., 2., 3., 3., 2., 1.]])
>>> np.power(x1, x2)
array([[ 0., 1., 8., 27., 16., 5.],
[ 0., 1., 8., 27., 16., 5.]])
"""
return _mx_nd_np.power(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def lcm(x1, x2, out=None, **kwargs):
"""
Returns the lowest common multiple of ``|x1|`` and ``|x2|``
Parameters
----------
x1, x2 : ndarrays or scalar values
The arrays for computing lowest common multiple. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which may be the shape of
one or the other).
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array
is returned.
Returns
-------
y : ndarray or scalar
The lowest common multiple of the absolute value of the inputs
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
gcd : The greatest common divisor
Examples
--------
>>> np.lcm(12, 20)
60
>>> np.lcm(np.arange(6, dtype=int), 20)
array([ 0, 20, 20, 60, 20, 20], dtype=int64)
"""
return _mx_nd_np.lcm(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sin(x, out=None, **kwargs):
r"""
Trigonometric sine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The sine of each element of x. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sin(np.pi/2.)
1.0
>>> np.sin(np.array((0., 30., 45., 60., 90.)) * np.pi / 180.)
array([0. , 0.5 , 0.70710677, 0.86602545, 1. ])
"""
return _mx_nd_np.sin(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cos(x, out=None, **kwargs):
r"""
Cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Angle, in radians (:math:`2 \pi` rad equals 360 degrees).
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding cosine values. This is a scalar if x is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cos(np.array([0, np.pi/2, np.pi]))
array([ 1.000000e+00, -4.371139e-08, -1.000000e+00])
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.cos(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.cos(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sinh(x, out=None, **kwargs):
"""
Hyperbolic sine, element-wise.
Equivalent to ``1/2 * (np.exp(x) - np.exp(-x))`` or ``-1j * np.sin(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic sine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sinh(0)
0.0
>>> # Example of providing the optional output parameter
>>> out1 = np.array([0], dtype='f')
>>> out2 = np.sinh(np.array([0.1]), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.sinh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cosh(x, out=None, **kwargs):
"""
Hyperbolic cosine, element-wise.
Equivalent to ``1/2 * (np.exp(x) + np.exp(-x))`` and ``np.cos(1j*x)``.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The corresponding hyperbolic cosine values. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.cosh(0)
1.0
"""
return _mx_nd_np.cosh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def tanh(x, out=None, **kwargs):
"""
Compute hyperbolic tangent element-wise.
Equivalent to ``np.sinh(x)/np.cosh(x)``.
Parameters
----------
x : ndarray or scalar.
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
----------
y : ndarray or scalar
The corresponding hyperbolic tangent values.
.. note::
If `out` is provided, the function writes the result into it,
and returns a reference to `out`. (See Examples)
* input x does not support complex computation (like imaginary number)
>>> np.tanh(np.pi*1j)
TypeError: type <type 'complex'> not supported
Examples
--------
>>> np.tanh(np.array[0, np.pi]))
array([0. , 0.9962721])
>>> np.tanh(np.pi)
0.99627207622075
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
>>> out1 = np.array(1)
>>> out2 = np.tanh(np.array(0.1), out1)
>>> out2 is out1
True
"""
return _mx_nd_np.tanh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log10(x, out=None, **kwargs):
"""
Return the base 10 logarithm of the input array, element-wise.
Parameters
----------
x : ndarray or scalar
Input array or scalar.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs broadcast to. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output is the same as that of the input if the input is an ndarray.
Returns
-------
y : ndarray or scalar
The logarithm to the base 10 of `x`, element-wise. NaNs are
returned where x is negative. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.log10(np.array([1e-15, -3.]))
array([-15., nan])
"""
return _mx_nd_np.log10(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sqrt(x, out=None, **kwargs):
"""
Return the non-negative square-root of an array, element-wise.
Parameters
----------
x : ndarray or scalar
The values whose square-roots are required.
out : ndarray, or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
An array of the same shape as `x`, containing the positive
square-root of each element in `x`. This is a scalar if `x` is a scalar.
Notes
----
This function only supports input type of float.
Examples
--------
>>> np.sqrt(np.array([1,4,9]))
array([1., 2., 3.])
>>> np.sqrt(np.array([4, -1, _np.inf]))
array([ 2., nan, inf])
"""
return _mx_nd_np.sqrt(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def cbrt(x, out=None, **kwargs):
"""
Return the cube-root of an array, element-wise.
Parameters
----------
x : ndarray
The values whose cube-roots are required.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
y : ndarray
An array of the same shape as x, containing the cube cube-root of each element in x.
If out was provided, y is a reference to it. This is a scalar if x is a scalar.
Examples
----------
>>> np.cbrt([1,8,27])
array([ 1., 2., 3.])
"""
return _mx_nd_np.cbrt(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def abs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> x = np.array([-1.2, 1.2])
>>> np.abs(x)
array([1.2, 1.2])
"""
return _mx_nd_np.abs(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def fabs(x, out=None, **kwargs):
r"""
Calculate the absolute value element-wise.
This function returns the absolute values (positive magnitude) of the
data in `x`. Complex values are not handled, use `absolute` to find the
absolute values of complex data.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
absolute : ndarray
An ndarray containing the absolute value of
each element in `x`. This is a scalar if `x` is a scalar.
Examples
--------
>>> np.fabs(-1)
1.0
>>> np.fabs(np.array([-1.2, 1.2]))s
array([ 1.2, 1.2])
"""
return _mx_nd_np.fabs(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def absolute(x, out=None, **kwargs):
"""
Calculate the absolute value element-wise.
np.abs is a shorthand for this function.
Parameters
----------
x : ndarray
Input array.
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape
that the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
absolute : ndarray
An ndarray containing the absolute value of each element in x.
Examples
----------
>>> x = np.array([-1.2, 1.2])
>>> np.absolute(x)
array([ 1.2, 1.2])
"""
return _mx_nd_np.absolute(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def exp(x, out=None, **kwargs):
r"""
Calculate the exponential of all elements in the input array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential of `x`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.exp(1)
2.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([0.36787945, 2.7182817 , 0.13533528, 7.389056 ])
"""
return _mx_nd_np.exp(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def expm1(x, out=None, **kwargs):
r"""
Calculate `exp(x) - 1` for all elements in the array.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array, element-wise exponential minus one: `out = exp(x) - 1`.
This is a scalar if `x` is a scalar.
Examples
--------
>>> np.expm1(1)
1.718281828459045
>>> x = np.array([-1, 1, -2, 2])
>>> np.exp(x)
array([-0.63212056, 1.71828183, -0.86466472, 6.3890561])
"""
return _mx_nd_np.expm1(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arcsin(x, out=None, **kwargs):
r"""
Inverse sine, element-wise.
Parameters
----------
x : ndarray or scalar
`y`-coordinate on the unit circle.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
angle : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
The inverse sine of each element in `x`, in radians and in the
closed interval ``[-pi/2, pi/2]``.
Examples
--------
>>> np.arcsin(1) # pi/2
1.5707963267948966
>>> np.arcsin(-1) # -pi/2
-1.5707963267948966
>>> np.arcsin(0)
0.0
.. note::
`arcsin` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that :math:`sin(z) = x`. The convention is to
return the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, *arcsin* always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
The inverse sine is also known as `asin` or sin^{-1}.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.arcsin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.arcsin.html>`_ in
the following aspects:
* Only support ndarray or scalar now.
* `where` argument is not supported.
* Complex input is not supported.
References
----------
Abramowitz, M. and Stegun, I. A., *Handbook of Mathematical Functions*,
10th printing, New York: Dover, 1964, pp. 79ff.
http://www.math.sfu.ca/~cbm/aands/
"""
return _mx_nd_np.arcsin(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arccos(x, out=None, **kwargs):
"""
Trigonometric inverse cosine, element-wise.
The inverse of cos so that, if y = cos(x), then x = arccos(y).
Parameters
----------
x : ndarray
x-coordinate on the unit circle. For real arguments, the domain is [-1, 1].
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that
the inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length equal to the number of outputs.
Returns
----------
angle : ndarray
The angle of the ray intersecting the unit circle at the given x-coordinate in radians [0, pi].
This is a scalar if x is a scalar.
Notes
----------
arccos is a multivalued function: for each x there are infinitely many numbers z such that
cos(z) = x. The convention is to return the angle z whose real part lies in [0, pi].
For real-valued input data types, arccos always returns real output.
For each value that cannot be expressed as a real number or infinity, it yields nan and sets
the invalid floating point error flag.
The inverse cos is also known as acos or cos^-1.
Examples
----------
>>> np.arccos([1, -1])
array([ 0. , 3.14159265])
"""
return _mx_nd_np.arccos(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arctan(x, out=None, **kwargs):
r"""
Trigonometric inverse tangent, element-wise.
The inverse of tan, so that if ``y = tan(x)`` then ``x = arctan(y)``.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Out has the same shape as `x`. It lies is in
``[-pi/2, pi/2]`` (``arctan(+/-inf)`` returns ``+/-pi/2``).
This is a scalar if `x` is a scalar.
Notes
-----
`arctan` is a multi-valued function: for each `x` there are infinitely
many numbers `z` such that tan(`z`) = `x`. The convention is to return
the angle `z` whose real part lies in [-pi/2, pi/2].
For real-valued input data types, `arctan` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
For complex-valued input, we do not have support for them yet.
The inverse tangent is also known as `atan` or tan^{-1}.
Examples
--------
>>> x = np.array([0, 1])
>>> np.arctan(x)
array([0. , 0.7853982])
>>> np.pi/4
0.7853981633974483
"""
return _mx_nd_np.arctan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def sign(x, out=None, **kwargs):
"""
Returns an element-wise indication of the sign of a number.
The `sign` function returns ``-1 if x < 0, 0 if x==0, 1 if x > 0``. Only supports real number.
Parameters
----------
x : ndarray or a scalar
Input values.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The sign of `x`.
This is a scalar if `x` is a scalar.
.. note::
* Only supports real number as input elements.
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([-5., 4.5])
>>> np.sign(a)
array([-1., 1.])
Scalars as input:
>>> np.sign(4.0)
1.0
>>> np.sign(0)
0
Use ``out`` parameter:
>>> b = np.zeros((2, ))
>>> np.sign(a, out=b)
array([-1., 1.])
>>> b
array([-1., 1.])
"""
return _mx_nd_np.sign(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log(x, out=None, **kwargs):
"""
Natural logarithm, element-wise.
The natural logarithm `log` is the inverse of the exponential function,
so that `log(exp(x)) = x`. The natural logarithm is logarithm in base
`e`.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The natural logarithm of `x`, element-wise.
This is a scalar if `x` is a scalar.
.. note::
Currently only supports data of real values and ``inf`` as input. Returns data of
real value, ``inf``, ``-inf`` and ``nan`` according to the input.
This function differs from the original `numpy.log
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.log.html>`_ in
the following aspects:
* Does not support complex number for now
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.array([1, np.exp(1), np.exp(2), 0], dtype=np.float64)
>>> np.log(a)
array([ 0., 1., 2., -inf], dtype=float64)
>>> # Using the default float32 dtype leads to slightly different behavior
>>> a = np.array([1, np.exp(1), np.exp(2), 0])
>>> np.log(a)
array([ 0., 0.99999994, 2., -inf])
>>> np.log(1)
0.0
"""
return _mx_nd_np.log(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def rint(x, out=None, **kwargs):
"""
Round elements of the array to the nearest integer.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
.. note::
This function differs from the original `numpy.rint
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.rint.html>`_ in
the following way(s):
* only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
* broadcasting to `out` of different shape is currently not supported
* when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.rint(a)
array([-2., -2., -0., 0., 1., 2., 2.])
"""
return _mx_nd_np.rint(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log2(x, out=None, **kwargs):
"""
Base-2 logarithm of x.
Parameters
----------
x : ndarray or scalar
Input values.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The logarithm base two of `x`, element-wise.
This is a scalar if `x` is a scalar.
.. note::
This function differs from the original `numpy.log2
<https://www.google.com/search?q=numpy+log2>`_ in
the following way(s):
* only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
* broadcasting to `out` of different shape is currently not supported
* when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> x = np.array([0, 1, 2, 2**4])
>>> np.log2(x)
array([-inf, 0., 1., 4.])
"""
return _mx_nd_np.log2(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def log1p(x, out=None, **kwargs):
"""
Return the natural logarithm of one plus the input array, element-wise.
Calculates ``log(1 + x)``.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
Natural logarithm of 1 + x, element-wise. This is a scalar
if x is a scalar.
Notes
-----
For real-valued input, `log1p` is accurate also for `x` so small
that `1 + x == 1` in floating-point accuracy.
Logarithm is a multivalued function: for each `x` there is an infinite
number of `z` such that `exp(z) = 1 + x`. The convention is to return
the `z` whose imaginary part lies in `[-pi, pi]`.
For real-valued input data types, `log1p` always returns real output.
For each value that cannot be expressed as a real number or infinity,
it yields ``nan`` and sets the `invalid` floating point error flag.
cannot support complex-valued input.
Examples
--------
>>> np.log1p(1e-99)
1e-99
>>> a = np.array([3, 4, 5])
>>> np.log1p(a)
array([1.3862944, 1.609438 , 1.7917595])
"""
return _mx_nd_np.log1p(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def degrees(x, out=None, **kwargs):
"""
Convert angles from radians to degrees.
Parameters
----------
x : ndarray
Input value. Elements must be of real value.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding degree values; if `out` was supplied this is a
reference to it.
This is a scalar if `x` is a scalar.
.. note::
This function differs from the original `numpy.degrees
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.degrees.html>`_ in
the following aspects:
* Input type does not support Python native iterables(list, tuple, ...).
Only ndarray is supported.
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> rad = np.arange(12.) * np.pi / 6
>>> np.degrees(rad)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> # Use specified ``out`` ndarray:
>>> out = np.zeros((rad.shape))
>>> np.degrees(rad, out)
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
>>> out
array([ 0., 30., 60., 90., 120., 150., 180., 210., 240., 270., 300., 330.])
"""
return _mx_nd_np.degrees(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def rad2deg(x, out=None, **kwargs):
r"""Convert angles from radians to degrees.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
.. note::
"rad2deg(x)" is "x * 180 / pi".
This function differs from the original numpy.arange in the following aspects:
* Only support float32 and float64.
* `out` must be in the same size of input.
Examples
--------
>>> np.rad2deg(np.pi/2)
90.0
"""
return _mx_nd_np.rad2deg(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def radians(x, out=None, **kwargs):
"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Input array in degrees.
out : ndarray or None
A location into which the result is stored.
If provided, it must have the same shape and type as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray
The corresponding radian values. This is a scalar if x is a scalar.
.. note::
This function differs from the original `numpy.radians
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.radians.html>`_ in
the following way(s):
* only ndarray or scalar is accpted as valid input, tuple of ndarray is not supported
* broadcasting to `out` of different shape is currently not supported
* when input is plain python numerics, the result will not be stored in the `out` param
Examples
--------
>>> deg = np.arange(12.) * 30.
>>> np.radians(deg)
array([0. , 0.5235988, 1.0471976, 1.5707964, 2.0943952, 2.6179938,
3.1415927, 3.6651914, 4.1887903, 4.712389 , 5.2359877, 5.7595863],
dtype=float32)
"""
return _mx_nd_np.radians(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def deg2rad(x, out=None, **kwargs):
r"""
Convert angles from degrees to radians.
Parameters
----------
x : ndarray or scalar
Angles in degrees.
out : ndarray or None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The corresponding angle in radians.
This is a scalar if `x` is a scalar.
.. note::
"deg2rad(x)" is "x * pi / 180".
This function differs from the original numpy.arange in the following aspects:
* Only support float32 and float64.
* `out` must be in the same size of input.
Examples
--------
>>> np.deg2rad(180)
3.1415927
"""
return _mx_nd_np.deg2rad(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def reciprocal(x, out=None, **kwargs):
r"""Return the reciprocal of the argument, element-wise.
Calculates ``1/x``.
Parameters
----------
x : ndarray or scalar
The values whose reciprocals are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.reciprocal(2.)
0.5
>>> x = np.array([1, 2., 3.33])
>>> np.reciprocal(x)
array([1. , 0.5 , 0.3003003])
.. note::
This function is not designed to work with integers.
For integer arguments with absolute value larger than 1 the result is
always zero because of the way Python handles integer division. For
integer zero the result is an overflow.
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.reciprocal
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.reciprocal.html>`_ in
the following aspects:
* Only support ndarray and scalar now.
* `where` argument is not supported.
"""
return _mx_nd_np.reciprocal(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def square(x, out=None, **kwargs):
r"""
Return the element-wise square of the input.
Parameters
----------
x : ndarray or scalar
The values whose squares are required.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape as the input.
If not provided or None, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
Output array is same shape and type as x. This is a scalar if x is a scalar.
Examples
--------
>>> np.square(2.)
4.0
>>> x = np.array([1, 2., -1])
>>> np.square(x)
array([1., 4., 1.])
.. note::
The output `ndarray` has the same `ctx` as the input `ndarray`.
This function differs from the original `numpy.square
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.square.html>`_ in
the following aspects:
* Only support ndarray and scalar now.
* `where` argument is not supported.
* Complex input is not supported.
"""
return _mx_nd_np.square(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def negative(x, out=None, **kwargs):
r"""
Numerical negative, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored.
If provided, it must have a shape that the inputs broadcast to.
If not provided or None, a freshly-allocated array is returned.
A tuple (possible only as a keyword argument) must have length
equal to the number of outputs.
Returns
-------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.
Examples
--------
>>> np.negative(1)
-1
"""
return _mx_nd_np.negative(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def fix(x, out=None, **kwargs):
"""
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters
----------
x : ndarray
An array of floats to be rounded
out : ndarray, optional
Output array
Returns
-------
y : ndarray or scalar
Returned array or scalar: y = -x. This is a scalar if x is a scalar.ndarray of floats
Examples
---------
>>> np.fix(3.14)
3
"""
return _mx_nd_np.fix(x, out=out)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def tan(x, out=None, **kwargs):
r"""
Compute tangent element-wise.
Equivalent to np.sin(x)/np.cos(x) element-wise.
Parameters
----------
x : ndarray
Input array.
out : ndarray or none, optional
A location into which the result is stored. If provided,
it must have a shape that the inputs broadcast to. If not provided or None,
a freshly-allocated array is returned. A tuple (possible only as a keyword argument)
must have length equal to the number of outputs.
Returns
-------
y : ndarray
The corresponding tangent values. This is a scalar if x is a scalar.
Examples
---------
>>> np.tan(np.array([-np.pi, np.pi/2, np.pi]))
array([-8.7422777e-08, -2.2877332e+07, 8.7422777e-08])
"""
return _mx_nd_np.tan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def ceil(x, out=None, **kwargs):
r"""
Return the ceiling of the input, element-wise.
The ceil of the ndarray `x` is the smallest integer `i`, such that
`i >= x`. It is often denoted as :math:`\lceil x \rceil`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The ceiling of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.ceil(a)
array([-1., -1., -0., 1., 2., 2., 2.])
>>> # if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.ceil(np.array(3.5), a)
array(4.)
>>> a
array(4.)
"""
return _mx_nd_np.ceil(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def floor(x, out=None, **kwargs):
r"""
Return the floor of the input, element-wise.
The ceil of the ndarray `x` is the largest integer `i`, such that
`i <= x`. It is often denoted as :math:`\lfloor x \rfloor`.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None
A location into which the result is stored. If provided, it
must have a shape that the inputs fill into. If not provided
or None, a freshly-allocated array is returned. The dtype of the
output and input must be the same.
Returns
-------
y : ndarray or scalar
The floor of each element in `x`, with `float` dtype.
This is a scalar if `x` is a scalar.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.floor(a)
array([-2., -2., -1., 0., 1., 1., 2.])
>>> # if you use parameter out, x and out must be ndarray.
>>> a = np.array(1)
>>> np.floor(np.array(3.5), a)
array(3.)
>>> a
array(3.)
"""
return _mx_nd_np.floor(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def invert(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _mx_nd_np.bitwise_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def bitwise_not(x, out=None, **kwargs):
r"""
Compute bit-wise inversion, or bit-wise NOT, element-wise.
Computes the bit-wise NOT of the underlying binary representation of
the integers in the input arrays. This ufunc implements the C/Python
operator ``~``.
Parameters
----------
x : array_like
Only integer and boolean types are handled.
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
out : ndarray or scalar
Result.
This is a scalar if `x` is a scalar.
See Also
--------
bitwise_and, bitwise_or, bitwise_xor
logical_not
binary_repr :
Return the binary representation of the input number as a string.
Examples
--------
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
>>> x = np.invert(np.array(13, dtype=np.uint8))
>>> x
242
>>> np.binary_repr(x, width=8)
'11110010'
Notes
-----
`bitwise_not` is an alias for `invert`:
>>> np.bitwise_not is np.invert
True
"""
return _mx_nd_np.bitwise_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def trunc(x, out=None, **kwargs):
r"""
Return the truncated value of the input, element-wise.
The truncated value of the scalar `x` is the nearest integer `i` which
is closer to zero than `x` is. In short, the fractional part of the
signed number `x` is discarded.
Parameters
----------
x : ndarray or scalar
Input data.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : ndarray or scalar
The truncated value of each element in `x`.
This is a scalar if `x` is a scalar.
.. note::
This function differs from the original numpy.trunc in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
>>> np.trunc(a)
array([-1., -1., -0., 0., 1., 1., 2.])
"""
return _mx_nd_np.trunc(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def logical_not(x, out=None, **kwargs):
r"""
Compute the truth value of NOT x element-wise.
Parameters
----------
x : ndarray or scalar
Logical NOT is applied to the elements of `x`.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
y : bool or ndarray of bool
Boolean result with the same shape as `x` of the NOT operation
on elements of `x`.
This is a scalar if `x` is a scalar.
.. note::
This function differs from the original numpy.logical_not in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> x= np.array([True, False, 0, 1])
>>> np.logical_not(x)
array([False, True, True, False])
>>> x = np.arange(5)
>>> np.logical_not(x<3)
array([False, False, False, True, True])
"""
return _mx_nd_np.logical_not(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arcsinh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arcsinh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
.. note::
`arcsinh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `sinh(z) = x`.
For real-valued input data types, `arcsinh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arcsinh in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Do not support complex-valued input.
* Cannot cast type automatically. DType of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arcsinh(a)
array([1.8309381, 2.2924316])
>>> np.arcsinh(1)
0.0
"""
return _mx_nd_np.arcsinh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arccosh(x, out=None, **kwargs):
r"""
Inverse hyperbolic cosine, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arccosh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
.. note::
`arccosh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `cosh(z) = x`.
For real-valued input data types, `arccosh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arccosh in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Do not support complex-valued input.
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([3.2, 5.0])
>>> np.arccosh(a)
array([1.8309381, 2.2924316])
>>> np.arccosh(1)
0.0
"""
return _mx_nd_np.arccosh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def arctanh(x, out=None, **kwargs):
r"""
Inverse hyperbolic tangent, element-wise.
Parameters
----------
x : ndarray or scalar
Input array.
out : ndarray or None, optional
A location into which the result is stored.
Returns
-------
arctanh : ndarray
Array of the same shape as `x`.
This is a scalar if `x` is a scalar.
.. note::
`arctanh` is a multivalued function: for each `x` there are infinitely
many numbers `z` such that `tanh(z) = x`.
For real-valued input data types, `arctanh` always returns real output.
For each value that cannot be expressed as a real number or infinity, it
yields ``nan`` and sets the `invalid` floating point error flag.
This function differs from the original numpy.arctanh in the following aspects:
* Do not support `where`, a parameter in numpy which indicates where to calculate.
* Do not support complex-valued input.
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot broadcast automatically. Shape of `out` must be same as the expected one.
* If `x` is plain python numeric, the result won't be stored in out.
Examples
--------
>>> a = np.array([0.0, -0.5])
>>> np.arctanh(a)
array([0., -0.54930615])
>>> np.arctanh(1)
0.0
"""
return _mx_nd_np.arctanh(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
Perform an indirect sort along the given axis using the algorithm specified
by the `kind` keyword. It returns an array of indices of the same shape as
`a` that index data along the given axis in sorted order.
Parameters
----------
a : ndarray
Array to sort.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
index_array : ndarray, int
Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
More generally, ``np.take_along_axis(a, index_array, axis=axis)``
always yields the sorted `a`, irrespective of dimensionality.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
One dimensional array:
>>> x = np.array([3, 1, 2])
>>> np.argsort(x)
array([1, 2, 0])
Two-dimensional array:
>>> x = np.array([[0, 3], [2, 2]])
>>> x
array([[0, 3],
[2, 2]])
>>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
>>> ind
array([[0, 1],
[1, 0]])
>>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
array([[0, 2],
[2, 3]])
>>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
>>> ind
array([[0, 1],
[0, 1]])
>>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
array([[0, 3],
[2, 2]])
Indices of the sorted elements of a N-dimensional array:
>>> ind = np.unravel_index(np.argsort(x, axis=None), x.shape)
>>> ind
(array([0, 1, 1, 0]), array([0, 0, 1, 1]))
>>> x[ind] # same as np.sort(x, axis=None)
array([0, 2, 2, 3])
"""
return _mx_nd_np.argsort(a, axis=axis, kind=kind, order=order)
@set_module('mxnet.numpy')
def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
Parameters
----------
a : ndarray
Array to be sorted.
axis : int or None, optional
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : string, optional
This argument can take any string, but it does not have any effect on the
final result.
order : str or list of str, optional
Not supported yet, will raise NotImplementedError if not None.
Returns
-------
sorted_array : ndarray
Array of the same type and shape as `a`.
Notes
-----
This operator does not support different sorting algorithms.
Examples
--------
>>> a = np.array([[1,4],[3,1]])
>>> np.sort(a) # sort along the last axis
array([[1, 4],
[1, 3]])
>>> np.sort(a, axis=None) # sort the flattened array
array([1, 1, 3, 4])
>>> np.sort(a, axis=0) # sort along the first axis
array([[1, 1],
[3, 4]])
"""
return _mx_nd_np.sort(a, axis=axis, kind=kind, order=order)
@set_module('mxnet.numpy')
def tensordot(a, b, axes=2):
r"""Compute tensor dot product along specified axes for arrays >= 1-D.
Given two tensors (arrays of dimension greater than or equal to one),
``a`` and ``b``, and an ndarray object containing two ndarray
objects, ``(a_axes, b_axes)``, sum the products of ``a``'s and ``b``'s
elements (components) over the axes specified by ``a_axes`` and
``b_axes``. The third argument can be a single non-negative
integer_like scalar, ``N``; if it is such, then the last ``N``
dimensions of ``a`` and the first ``N`` dimensions of ``b`` are summed
over.
Parameters
----------
a, b : ndarray, len(shape) >= 1
Tensors to "dot".
axes : int or (2,) ndarray
* integer_like
If an int N, sum over the last N axes of `a` and the first N axes
of `b` in order. The sizes of the corresponding axes must match.
* (2,) ndarray
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements ndarray must be of the same length.
See Also
--------
dot, einsum
.. note::
Three common use cases are:
* ``axes = 0`` : tensor product :math:`a\otimes b`
* ``axes = 1`` : tensor dot product :math:`a\cdot b`
* ``axes = 2`` : (default) tensor double contraction :math:`a:b`
When `axes` is integer_like, the sequence for evaluation will be: first
the -Nth axis in `a` and 0th axis in `b`, and the -1th axis in `a` and
Nth axis in `b` last.
When there is more than one axis to sum over - and they are not the last
(first) axes of `a` (`b`) - the argument `axes` should consist of
two sequences of the same length, with the first axis to sum over given
first in both sequences, the second axis second, and so forth.
Examples
--------
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> c = np.tensordot(a,b, axes=([1,0],[0,1]))
>>> c.shape
(5, 2)
>>> c
array([[ 4400., 4730.],
[ 4532., 4874.],
[ 4664., 5018.],
[ 4796., 5162.],
[ 4928., 5306.]])
"""
return _mx_nd_np.tensordot(a, b, axes)
@set_module('mxnet.numpy')
def histogram(a, bins=10, range=None, normed=None, weights=None, density=None): # pylint: disable=too-many-arguments
"""
Compute the histogram of a set of data.
Parameters
----------
a : ndarray
Input data. The histogram is computed over the flattened array.
bins : int or ndarray
If `bins` is an int, it defines the number of equal-width
bins in the given range (10, by default). If `bins` is a
sequence, it defines a monotonically increasing array of bin edges,
including the rightmost edge, allowing for non-uniform bin widths.
.. versionadded:: 1.11.0
If `bins` is a string, it defines the method used to calculate the
optimal bin width, as defined by `histogram_bin_edges`.
range : (float, float)
The lower and upper range of the bins. Required when `bins` is an integer.
Values outside the range are ignored. The first element of the range must
be less than or equal to the second.
normed : bool, optional
Not supported yet, coming soon.
weights : array_like, optional
Not supported yet, coming soon.
density : bool, optional
Not supported yet, coming soon.
Examples
--------
>>> np.histogram(np.arange(4), bins=np.arange(5))
[array([1, 1, 1, 1], dtype=int64), array([0., 1., 2., 3., 4.])]
"""
return _mx_nd_np.histogram(a, bins=bins, range=range, normed=normed, weights=weights, density=density)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def eye(N, M=None, k=0, dtype=float, **kwargs):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the output.
M : int, optional
Number of columns in the output. If None, defaults to N.
k : int, optional
Index of the diagonal: 0 (the default) refers to the main diagonal,
a positive value refers to an upper diagonal,
and a negative value to a lower diagonal.
dtype : data-type, optional
Data-type of the returned array.
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Returns
-------
I : ndarray of shape (N,M)
An array where all elements are equal to zero,
except for the k-th diagonal, whose values are equal to one.
Examples
--------
>>> np.eye(2, dtype=int)
array([[1, 0],
[0, 1]], dtype=int64)
>>> np.eye(3, k=1)
array([[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.]])
"""
return _mx_nd_np.eye(N, M, k, dtype, **kwargs)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0, ctx=None): # pylint: disable=too-many-arguments
r"""
Return evenly spaced numbers over a specified interval.
Returns num evenly spaced samples, calculated over the interval [start, stop].
The endpoint of the interval can optionally be excluded.
Parameters
----------
start : real number
The starting value of the sequence.
stop : real number
The end value of the sequence, unless endpoint is set to False. In
that case, the sequence consists of all but the last of num + 1
evenly spaced samples, so that stop is excluded. Note that the step
size changes when endpoint is False.
num : int, optional
Number of samples to generate. Default is 50. Must be non-negative.
endpoint : bool, optional
If True, stop is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between samples.
dtype : dtype, optional
The type of the output array. If dtype is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start or
stop are array-like. By default (0), the samples will be along a new
axis inserted at the beginning. Use -1 to get an axis at the end.
Returns
-------
samples : ndarray
There are num equally spaced samples in the closed interval
`[start, stop]` or the half-open interval `[start, stop)`
(depending on whether endpoint is True or False).
step : float, optional
Only returned if retstep is True
Size of spacing between samples.
See Also
--------
arange : Similar to `linspace`, but uses a step size (instead of the
number of samples).
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
(array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
>>> import matplotlib.pyplot as plt
>>> N = 8
>>> y = np.zeros(N)
>>> x1 = np.linspace(0, 10, N, endpoint=True)
>>> x2 = np.linspace(0, 10, N, endpoint=False)
>>> plt.plot(x1.asnumpy(), y.asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(x2.asnumpy(), (y + 0.5).asnumpy(), 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.ylim([-0.5, 1])
(-0.5, 1)
>>> plt.show()
.. note::
This function differs from the original `numpy.linspace
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html>`_ in
the following aspects:
* `start` and `stop` do not support list, numpy ndarray and mxnet ndarray
* axis could only be 0
* There could be an additional `ctx` argument to specify the device, e.g. the i-th
GPU.
"""
return _mx_nd_np.linspace(start, stop, num, endpoint, retstep, dtype, axis, ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0, ctx=None):
r"""Return numbers spaced evenly on a log scale.
In linear space, the sequence starts at ``base ** start``
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
Non-scalar `start` and `stop` are now supported.
Parameters
----------
start : int or float
``base ** start`` is the starting value of the sequence.
stop : int or float
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
length `num`) are returned.
num : integer, optional
Number of samples to generate. Default is 50.
endpoint : boolean, optional
If true, `stop` is the last sample. Otherwise, it is not included.
Default is True.
base : float, optional
The base of the log space. The step size between the elements in
``ln(samples) / ln(base)`` (or ``log_base(samples)``) is uniform.
Default is 10.0.
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
axis : int, optional
The axis in the result to store the samples. Relevant only if start
or stop are array-like. By default (0), the samples will be along a
new axis inserted at the beginning. Now, axis only support axis = 0.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
samples : ndarray
`num` samples, equally spaced on a log scale.
See Also
--------
arange : Similar to linspace, with the step size specified instead of the
number of samples. Note that, when used with a float endpoint, the
endpoint may or may not be included.
linspace : Similar to logspace, but with the samples uniformly distributed
in linear space, instead of log space.
Notes
-----
Logspace is equivalent to the code
>>> y = np.linspace(start, stop, num=num, endpoint=endpoint)
...
>>> power(base, y).astype(dtype)
...
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
array([ 100. , 215.44347, 464.15887, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
array([100. , 177.82794, 316.22775, 562.3413 ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
array([4. , 5.0396843, 6.349604 , 8. ])
>>> np.logspace(2.0, 3.0, num=4, base=2.0, dtype=np.int32)
array([4, 5, 6, 8], dtype=int32)
>>> np.logspace(2.0, 3.0, num=4, ctx=npx.gpu(0))
array([ 100. , 215.44347, 464.15887, 1000. ], ctx=gpu(0))
"""
return _mx_nd_np.logspace(start, stop, num, endpoint, base, dtype, axis, ctx=ctx)
# pylint: enable=too-many-arguments, redefined-outer-name
@set_module('mxnet.numpy')
def expand_dims(a, axis):
"""Expand the shape of an array.
Insert a new axis that will appear at the `axis` position in the expanded array shape.
Parameters
----------
a : ndarray
Input array.
axis : int
Position in the expanded axes where the new axis is placed.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
squeeze : The inverse operation, removing singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1., 2.]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]
>>> y
array([[1.],
[2.]])
>>> y.shape
(2, 1)
Note that some examples may use None instead of np.newaxis. These are the same objects:
>>> np.newaxis is None
True
"""
return _npi.expand_dims(a, axis)
@set_module('mxnet.numpy')
def tile(A, reps):
r"""
Construct an array by repeating A the number of times given by reps.
If `reps` has length ``d``, the result will have dimension of
``max(d, A.ndim)``.
If ``A.ndim < d``, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1, 3) for 2-D replication,
or shape (1, 1, 3) for 3-D replication. If this is not the desired
behavior, promote `A` to d-dimensions manually before calling this
function.
If ``A.ndim > d``, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2, 3, 4, 5), a `reps` of (2, 2) is treated as
(1, 1, 2, 2).
Parameters
----------
A : ndarray or scalar
An input array or a scalar to repeat.
reps : a single integer or tuple of integers
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The tiled output array.
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0., 1., 2., 0., 1., 2.])
>>> np.tile(a, (2, 2))
array([[0., 1., 2., 0., 1., 2.],
[0., 1., 2., 0., 1., 2.]])
>>> np.tile(a, (2, 1, 2))
array([[[0., 1., 2., 0., 1., 2.]],
[[0., 1., 2., 0., 1., 2.]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1., 2., 1., 2.],
[3., 4., 3., 4.]])
>>> np.tile(b, (2, 1))
array([[1., 2.],
[3., 4.],
[1., 2.],
[3., 4.]])
>>> c = np.array([1,2,3,4])
>>> np.tile(c,(4,1))
array([[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.],
[1., 2., 3., 4.]])
Scalar as input:
>>> np.tile(2, 3)
array([2, 2, 2]) # repeating integer `2`
"""
return _mx_nd_np.tile(A, reps)
@set_module('mxnet.numpy')
def trace(a, offset=0, axis1=0, axis2=1, out=None):
"""
Return the sum along diagonals of the array.
If `a` is 2-D, the sum along its diagonal with the given offset
is returned, i.e., the sum of elements ``a[i,i+offset]`` for all i.
If `a` has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-arrays whose traces are returned.
The shape of the resulting array is the same as that of `a` with `axis1`
and `axis2` removed.
Parameters
----------
a : ndarray
Input array, from which the diagonals are taken.
offset : int, optional
Offset of the diagonal from the main diagonal. Can be both positive
and negative. Defaults to 0.
axis1, axis2 : int, optional
Axes to be used as the first and second axis of the 2-D sub-arrays
from which the diagonals should be taken. Defaults are the first two
axes of `a`.
out : ndarray, optional
Array into which the output is placed. It must be of the right shape
and right type to hold the output.
Returns
-------
sum_along_diagonals : ndarray
If `a` is 2-D, the sum along the diagonal is returned. If `a` has
larger dimensions, then an array of sums along diagonals is returned.
Examples
--------
>>> a = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
>>> np.trace(a)
array(3.)
>>> a = np.arange(8).reshape((2, 2, 2))
>>> np.trace(a)
array([6., 8.])
>>> a = np.arange(24).reshape((2, 2, 2, 3))
>>> np.trace(a).shape
(2, 3)
"""
return _mx_nd_np.trace(a, offset, axis1, axis2, out)
@set_module('mxnet.numpy')
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
Parameters
----------
a : ndarray
Input array.
axes : list of ints, optional
By default, reverse the dimensions,
otherwise permute the axes according to the values given.
Returns
-------
p : ndarray
a with its axes permuted.
.. note::
This function differs from the original `numpy.transpose
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html>`_ in
the following way(s):
* only ndarray is accepted as valid input, python iterables are not supported
* the operator always returns an `ndarray` that does not share the memory with the input
Examples
--------
>>> x = np.arange(4).reshape((2,2))
>>> x
array([[0., 1.],
[2., 3.]])
>>> np.transpose(x)
array([[0., 2.],
[1., 3.]])
>>> x = np.ones((1, 2, 3))
>>> np.transpose(x, (1, 0, 2)).shape
(2, 1, 3)
"""
return _mx_nd_np.transpose(a, axes)
@set_module('mxnet.numpy')
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
Parameters
----------
a : array_like
Input array.
repeats : int
The number of repetitions for each element.
axis : int, optional
The axis along which to repeat values. By default, use the
flattened input array, and return a flat output array.
Returns
-------
repeated_array : ndarray
Output array which has the same shape as `a`, except along
the given axis.
See Also
--------
tile : Tile an array.
Examples
--------
>>> np.repeat(3, 4)
array([3, 3, 3, 3])
>>> x = np.array([[1,2],[3,4]])
>>> np.repeat(x, 2)
array([1, 1, 2, 2, 3, 3, 4, 4])
>>> np.repeat(x, 3, axis=1)
array([[1, 1, 1, 2, 2, 2],
[3, 3, 3, 4, 4, 4]])
>>> np.repeat(x, [1, 2], axis=0)
array([[1, 2],
[3, 4],
[3, 4]])
"""
return _mx_nd_np.repeat(a, repeats, axis)
@set_module('mxnet.numpy')
def tril(m, k=0):
r"""
Lower triangle of an array.
Return a copy of an array with elements above the `k`-th diagonal zeroed.
Parameters
----------
m : ndarray, shape (M, N)
Input array.
k : int, optional
Diagonal above which to zero elements. `k = 0` (the default) is the
main diagonal, `k < 0` is below it and `k > 0` is above.
Returns
-------
tril : ndarray, shape (M, N)
Lower triangle of `m`, of same shape and data-type as `m`.
See Also
--------
triu : same thing, only for the upper triangle
Examples
--------
>>> a = np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]])
>>> np.tril(a, -1)
array([[ 0., 0., 0.],
[ 4., 0., 0.],
[ 7., 8., 0.],
[10., 11., 12.]])
"""
return _mx_nd_np.tril(m, k)
@set_module('mxnet.numpy')
def tri(N, M=None, k=0, dtype=None, ctx=None): # pylint: disable=redefined-outer-name
r"""
An array with ones at and below the given diagonal and zeros elsewhere.
Parameters
----------
N : int
Number of rows in the array.
M : int, optional
Number of columns in the array.
By default, `M` is taken equal to `N`.
k : int, optional
The sub-diagonal at and below which the array is filled.
`k` = 0 is the main diagonal, while `k` < 0 is below it,
and `k` > 0 is above. The default is 0.
dtype : dtype, optional
Data type of the returned array. The default is float.
Returns
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
Examples
--------
>>> np.tri(3, 5, 2, dtype=int)
array([[1, 1, 1, 0, 0],
[1, 1, 1, 1, 0],
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
array([[0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0.],
[1., 1., 0., 0., 0.]])
"""
return _mx_nd_np.tri(N, M, k, dtype, ctx)
@set_module('mxnet.numpy')
def triu_indices(n, k=0, m=None, ctx=None): # pylint: disable=redefined-outer-name
r"""
Return the indices for the upper-triangle of an (n, m) array.
Parameters
----------
n : int
The size of the arrays for which the returned indices will
be valid.
k : int, optional
Diagonal offset (see `triu` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple, shape(2) of ndarrays, shape(`n`)
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array. Can be used
to slice a ndarray of shape(`n`, `n`).
See also
--------
tril_indices : similar function, for lower-triangular.
mask_indices : generic function accepting an arbitrary mask function.
triu, tril
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
upper triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> iu1 = np.triu_indices(4)
>>> iu2 = np.triu_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[iu1]
array([ 0, 1, 2, ..., 10, 11, 15])
And for assigning values:
>>> a[iu1] = -1
>>> a
array([[-1, -1, -1, -1],
[ 4, -1, -1, -1],
[ 8, 9, -1, -1],
[12, 13, 14, -1]])
These cover only a small part of the whole array (two diagonals right
of the main one):
>>> a[iu2] = -10
>>> a
array([[ -1, -1, -10, -10],
[ 4, -1, -1, -10],
[ 8, 9, -1, -1],
[ 12, 13, 14, -1]])
"""
return _mx_nd_np.triu_indices(n, k, m, ctx)
@set_module('mxnet.numpy')
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
See `triu_indices` for full details.
Parameters
----------
arr : ndarray, shape(N, N)
The indices will be valid for square arrays.
k : int, optional
Diagonal offset (see `triu` for details).
Returns
-------
triu_indices_from : tuple, shape(2) of ndarray, shape(N)
Indices for the upper-triangle of `arr`.
See Also
--------
triu_indices, triu
"""
return _mx_nd_np.triu_indices_from(arr, k)
@set_module('mxnet.numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
Parameters
----------
n : int
The row dimension of the arrays for which the returned
indices will be valid.
k : int, optional
Diagonal offset (see `tril` for details).
m : int, optional
.. versionadded:: 1.9.0
The column dimension of the arrays for which the returned
arrays will be valid.
By default `m` is taken equal to `n`.
Returns
-------
inds : tuple of arrays
The indices for the triangle. The returned tuple contains two arrays,
each with the indices along one dimension of the array.
See also
--------
triu_indices : similar function, for upper-triangular.
mask_indices : generic function accepting an arbitrary mask function.
tril, triu
Examples
--------
Compute two different sets of indices to access 4x4 arrays, one for the
lower triangular part starting at the main diagonal, and one starting two
diagonals further right:
>>> il1 = np.tril_indices(4)
>>> il2 = np.tril_indices(4, 2)
Here is how they can be used with a sample array:
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
Both for indexing:
>>> a[il1]
array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
And for assigning values:
>>> a[il1] = -1
>>> a
array([[-1, 1, 2, 3],
[-1, -1, 6, 7],
[-1, -1, -1, 11],
[-1, -1, -1, -1]])
These cover almost the whole array (two diagonals right of the main one):
>>> a[il2] = -10
>>> a
array([[-10, -10, -10, 3],
[-10, -10, -10, -10],
[-10, -10, -10, -10],
[-10, -10, -10, -10]])
"""
if m is None:
m = n
return _mx_nd_np.tril_indices(n, k, m)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def triu(m, k=0):
r"""
Upper triangle of an array.
Return a copy of a matrix with the elements below the `k`-th diagonal
zeroed.
Please refer to the documentation for `tril` for further details.
See Also
--------
tril : lower triangle of an array
Examples
--------
>>> np.triu(np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]), -1)
array([[ 1, 2, 3],
[ 4, 5, 6],
[ 0, 8, 9],
[ 0, 0, 12]])
"""
return _mx_nd_np.triu(m, k)
@set_module('mxnet.numpy')
def arange(start, stop=None, step=1, dtype=None, ctx=None):
"""Return evenly spaced values within a given interval.
Values are generated within the half-open interval ``[start, stop)``
(in other words, the interval including `start` but excluding `stop`).
For integer arguments the function is equivalent to the Python built-in
`range` function, but returns an ndarray rather than a list.
Parameters
----------
start : number, optional
Start of interval. The interval includes this value. The default
start value is 0.
stop : number
End of interval. The interval does not include this value, except
in some cases where `step` is not an integer and floating point
round-off affects the length of `out`.
step : number, optional
Spacing between values. For any output `out`, this is the distance
between two adjacent values, ``out[i+1] - out[i]``. The default
step size is 1. If `step` is specified as a position argument,
`start` must also be given.
dtype : dtype
The type of the output array.
Default dtype can be set to be consistent with offical numpy by `npx.set_np(dtype=True)`.
* When npx.is_np_default_dtype() returns False, default dtype is float32;
* When npx.is_np_default_dtype() returns True, default dtype is int64.
Returns
-------
arange : ndarray
Array of evenly spaced values.
For floating point arguments, the length of the result is
``ceil((stop - start)/step)``. Because of floating point overflow,
this rule may result in the last element of `out` being greater
than `stop`.
Examples
--------
>>> np.arange(3)
array([0., 1., 2.])
>>> np.arange(3.0)
array([0., 1., 2.])
>>> np.arange(3,7)
array([3., 4., 5., 6.])
>>> np.arange(3,7,2)
array([3., 5.])
>>> np.arange(3).dtype
dtype('float32')
>>> npx.set_np(dtype=True)
>>> np.arange(3).dtype
dtype('int64')
"""
return _mx_nd_np.arange(start, stop, step, dtype, ctx)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis=0``, result in
* ary[:2]
* ary[2:3]
* ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join a sequence of arrays along an existing axis.
stack : Join a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
"""
return _mx_nd_np.split(ary, indices_or_sections, axis=axis)
@set_module('mxnet.numpy')
def array_split(ary, indices_or_sections, axis=0):
"""Split an array into multiple sub-arrays.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an array of length l that should be split into n sections, it returns
l % n sub-arrays of size l//n + 1 and the rest of size l//n.
If `indices_or_sections` is a 1-D array of sorted integers, the entries
indicate where along `axis` the array is split. For example, ``[2, 3]``
would, for ``axis=0``, result in
* ary[:2]
* ary[2:3]
* ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1-D Python tuple, list or set.
Param used to determine the number and size of the subarray.
axis : int, optional
The axis along which to split, default is 0.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
Examples
--------
>>> x = np.arange(9.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> np.array_split(x, [3, 5, 6, 8])
[array([0., 1., 2.]), array([3., 4.]), array([5.]), array([6., 7.]), array([])]
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
[array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]
"""
return _mx_nd_np.array_split(ary, indices_or_sections, axis=axis)
@set_module('mxnet.numpy')
def vsplit(ary, indices_or_sections):
r"""Split an array into multiple sub-arrays vertically (row-wise).
``vsplit`` is equivalent to ``split`` with `axis=0` (default): the array is always split
along the first axis regardless of the array dimension.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 0. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 0 the array is split. For example, ``[2, 3]`` would result in
* ary[:2]
* ary[2:3]
* ary[3:]
If an index exceeds the dimension of the array along axis 0, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
.. note::
This function differs from the original `numpy.vsplit
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.vsplit.html>`_ in
the following aspects:
* Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
* In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 0,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[ 12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
[array([[0., 1., 2., 3.],
[4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])]
>>> # With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.vsplit(x, 2)
[array([[[0., 1.],
[2., 3.]]]), array([[[4., 5.],
[6., 7.]]])]
"""
return _mx_nd_np.vsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def dsplit(ary, indices_or_sections):
r"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Please refer to the `split` documentation. `dsplit` is equivalent
to `split` with ``axis=2``, the array is always split along the third
axis provided the array dimension is greater than or equal to 3.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int or 1 - D Python tuple, list or set.
If `indices_or_sections` is an integer, N, the array will be divided into N equal arrays
along axis 2. If such a split is not possible, an error is raised.
If `indices_or_sections` is a 1-D array of sorted integers, the entries indicate where
along axis 2 the array is split. For example, ``[2, 3]`` would result in
* ary[:, :, :2]
* ary[:, :, 2:3]
* ary[:, :, 3:]
If an index exceeds the dimension of the array along axis 2, an error will be thrown.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
.. note::
This function differs from the original `numpy.dsplit
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.dsplit.html>`_ in
the following aspects:
* Currently parameter ``indices_or_sections`` does not support ndarray, but supports scalar,
tuple and list.
* In ``indices_or_sections``, if an index exceeds the dimension of the array along axis 2,
an error will be thrown.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
array([[[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.]],
[[ 8., 9., 10., 11.],
[12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
[array([[[ 0., 1.],
[ 4., 5.]],
[[ 8., 9.],
[12., 13.]]]), array([[[ 2., 3.],
[ 6., 7.]],
[[10., 11.],
[14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
[[ 8., 9., 10.],
[12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
[[11.],
[15.]]]),
array([], shape=(2, 2, 0), dtype=float64)]
"""
return _mx_nd_np.dsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def concatenate(seq, axis=0, out=None):
"""Join a sequence of arrays along an existing axis.
Parameters
----------
a1, a2, ... : sequence of array_like
The arrays must have the same shape, except in the dimension
corresponding to `axis` (the first, by default).
axis : int, optional
The axis along which the arrays will be joined. If axis is None,
arrays are flattened before use. Default is 0.
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what concatenate would have returned if no
out argument were specified.
Returns
-------
res : ndarray
The concatenated array.
See Also
--------
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into multiple sub-arrays horizontally (column wise)
vsplit : Split array into multiple sub-arrays vertically (row wise)
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
stack : Stack a sequence of arrays along a new axis.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> b = np.array([[5, 6]])
>>> np.concatenate((a, b), axis=0)
array([[1., 2.],
[3., 4.],
[5., 6.]])
>>> np.concatenate((a, b.T), axis=1)
array([[1., 2., 5.],
[3., 4., 6.]])
>>> np.concatenate((a, b), axis=None)
array([1., 2., 3., 4., 5., 6.])
"""
return _mx_nd_np.concatenate(seq, axis=axis, out=out)
@set_module('mxnet.numpy')
def append(arr, values, axis=None): # pylint: disable=redefined-outer-name
"""
Append values to the end of an array.
Parameters
----------
arr : ndarray
Values are appended to a copy of this array.
values : ndarray
These values are appended to a copy of `arr`. It must be of the
correct shape (the same shape as `arr`, excluding `axis`). If
`axis` is not specified, `values` can be any shape and will be
flattened before use.
axis : int, optional
The axis along which `values` are appended. If `axis` is not
given, both `arr` and `values` are flattened before use.
Returns
-------
append : ndarray
A copy of `arr` with `values` appended to `axis`. Note that
`append` does not occur in-place: a new array is allocated and
filled. If `axis` is None, `out` is a flattened array.
Examples
--------
>>> np.append(np.array([1, 2, 3]), np.array([[4, 5, 6],[7, 8, 9]]))
array([1., 2., 3., 4., 5., 6., 7., 8., 9.])
When `axis` is specified, `values` must have the correct shape.
>>> np.append(np.array([[1, 2, 3], [4, 5, 6]]), np.array([[7, 8, 9]]), axis=0)
array([[1., 2., 3.],
[4., 5., 6.],
[7., 8., 9.]])
"""
return _mx_nd_np.append(arr, values, axis=axis)
@set_module('mxnet.numpy')
def stack(arrays, axis=0, out=None):
"""Join a sequence of arrays along a new axis.
The axis parameter specifies the index of the new axis in the dimensions of the result.
For example, if `axis=0` it will be the first dimension and if `axis=-1` it will be the last dimension.
Parameters
----------
arrays : sequence of array_like
Each array must have the same shape.
axis : int, optional
The axis in the result array along which the input arrays are stacked.
out : ndarray, optional
If provided, the destination to place the result. The shape must be correct,
matching that of what stack would have returned if no out argument were specified.
Returns
-------
stacked : ndarray
The stacked array has one more dimension than the input arrays.
See Also
--------
concatenate : Join a sequence of arrays along an existing axis.
split : Split array into a list of multiple sub-arrays of equal size.
Examples
--------
>>> arrays = [np.random.rand(3, 4) for _ in range(10)]
>>> np.stack(arrays, axis=0).shape
(10, 3, 4)
>>> np.stack(arrays, axis=1).shape
(3, 10, 4)
>>> np.stack(arrays, axis=2).shape
(3, 4, 10)
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.stack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> np.stack((a, b), axis=-1)
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.stack(arrays, axis=axis, out=out)
@set_module('mxnet.numpy')
def vstack(arrays, out=None):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
return _mx_nd_np.vstack(arrays)
@set_module('mxnet.numpy')
def row_stack(arrays):
r"""Stack arrays in sequence vertically (row wise).
This is equivalent to concatenation along the first axis after 1-D arrays
of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
`vsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate` and `stack`
provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the first axis.
1-D arrays must have the same length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 2-D.
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a, b))
array([[1., 2., 3.],
[2., 3., 4.]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a, b))
array([[1.],
[2.],
[3.],
[2.],
[3.],
[4.]])
"""
return _mx_nd_np.row_stack(arrays)
@set_module('mxnet.numpy')
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with `hstack`. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Returns
--------
stacked : 2-D array
The array formed by stacking the given arrays.
See Also
--------
stack, hstack, vstack, concatenate
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.column_stack(tup)
@set_module('mxnet.numpy')
def hstack(arrays):
"""
Stack arrays in sequence horizontally (column wise).
This is equivalent to concatenation along the second axis,
except for 1-D arrays where it concatenates along the first axis.
Rebuilds arrays divided by hsplit.
This function makes most sense for arrays with up to 3 dimensions.
For instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions concatenate,
stack and block provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of ndarrays
The arrays must have the same shape along all but the second axis, except 1-D arrays which can be any length.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
Examples
--------
>>> from mxnet import np,npx
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1., 2., 3., 2., 3., 4.])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1., 2.],
[2., 3.],
[3., 4.]])
"""
return _mx_nd_np.hstack(arrays)
@set_module('mxnet.numpy')
def dstack(arrays):
"""
Stack arrays in sequence depth wise (along third axis).
This is equivalent to concatenation along the third axis after 2-D arrays
of shape `(M,N)` have been reshaped to `(M,N,1)` and 1-D arrays of shape
`(N,)` have been reshaped to `(1,N,1)`. Rebuilds arrays divided by
`dsplit`.
This function makes most sense for arrays with up to 3 dimensions. For
instance, for pixel-data with a height (first axis), width (second axis),
and r/g/b channels (third axis). The functions `concatenate`, `stack` and
`block` provide more general stacking and concatenation operations.
Parameters
----------
tup : sequence of arrays
The arrays must have the same shape along all but the third axis.
1-D or 2-D arrays must have the same shape.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays, will be at least 3-D.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
[[2, 3]],
[[3, 4]]])
"""
return _npi.dstack(*arrays)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def maximum(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.maximum(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([2., 5., 4.])
>>> np.maximum(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[1. , 2. ],
[0.5, 2. ]])
"""
return _mx_nd_np.maximum(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmax(x1, x2, out=None, **kwargs):
"""
Returns element-wise maximum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The maximum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmax(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([2., 5., 4.])
>>> np.fmax(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[1. , 2. ],
[0.5, 2. ]])
"""
return _mx_nd_np.fmax(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def minimum(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting.
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The minimum of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.minimum(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([1., 3., 2.])
>>> np.minimum(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[0.5, 0. ],
[0. , 1. ]])
"""
return _mx_nd_np.minimum(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def fmin(x1, x2, out=None, **kwargs):
"""
Returns element-wise minimum of the input arrays with broadcasting. (Ignores NaNs)
Parameters
----------
x1, x2 : scalar or mxnet.numpy.ndarray
The arrays holding the elements to be compared. They must have the same shape,
or shapes that can be broadcast to a single shape.
Returns
-------
out : mxnet.numpy.ndarray or scalar
The fmin of x1 and x2, element-wise. This is a scalar if both x1 and x2 are scalars.
Examples
--------
>>> np.fmin(np.array([2, 3, 4]), np.array([1, 5, 2]))
array([1., 3., 2.])
>>> np.fmin(np.eye(2), np.array([0.5, 2])) # broadcasting
array([[0.5, 0. ],
[0. , 1. ]])
"""
return _mx_nd_np.fmin(x1, x2, out=out)
@set_module('mxnet.numpy')
def max(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : ndarray
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.max(a) # Maximum of the flattened array
array(3.)
>>> np.max(a, axis=0) # Maxima along the first axis
array([2., 3.])
>>> np.max(a, axis=1) # Maxima along the second axis
array([1., 3.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.max(b)
array(4.)
"""
return _mx_nd_np.max(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def min(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.min(a) # Minimum of the flattened array
array(0.)
>>> np.min(a, axis=0) # Minima along the first axis
array([0., 1.])
>>> np.min(a, axis=1) # Minima along the second axis
array([0., 2.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.min(b)
array(0.) # nan will be ignored
"""
return _mx_nd_np.min(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def swapaxes(a, axis1, axis2):
"""Interchange two axes of an array.
Parameters
----------
a : ndarray
Input array.
axis1 : int
First axis.
axis2 : int
Second axis.
Returns
-------
a_swapped : ndarray
Swapped array. This is always a copy of the input array.
Examples
--------
>>> x = np.array([[1,2,3]])
>>> np.swapaxes(x,0,1)
array([[1.],
[2.],
[3.]])
>>> x = np.array([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x
array([[[0., 1.],
[2., 3.]],
[[4., 5.],
[6., 7.]]])
>>> np.swapaxes(x,0,2)
array([[[0., 4.],
[2., 6.]],
[[1., 5.],
[3., 7.]]])
"""
return _npi.swapaxes(a, dim1=axis1, dim2=axis2)
@set_module('mxnet.numpy')
def clip(a, a_min, a_max, out=None):
"""clip(a, a_min, a_max, out=None)
Clip (limit) the values in an array.
Given an interval, values outside the interval are clipped to
the interval edges. For example, if an interval of ``[0, 1]``
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
Parameters
----------
a : ndarray
Array containing elements to clip.
a_min : scalar or `None`
Minimum value. If `None`, clipping is not performed on lower
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
a_max : scalar or `None`
Maximum value. If `None`, clipping is not performed on upper
interval edge. Not more than one of `a_min` and `a_max` may be
`None`.
out : ndarray, optional
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
Returns
-------
clipped_array : ndarray
An array with the elements of `a`, but where values
< `a_min` are replaced with `a_min`, and those > `a_max`
with `a_max`.
Notes
-----
array_like `a_min` and `a_max` are not supported.
Examples
--------
>>> a = np.arange(10)
>>> np.clip(a, 1, 8)
array([1., 1., 2., 3., 4., 5., 6., 7., 8., 8.])
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.clip(a, 3, 6, out=a)
array([3., 3., 3., 3., 4., 5., 6., 6., 6., 6.])
"""
from numbers import Number
if isinstance(a, Number):
# In case input is a scalar, the computation would fall back to native numpy.
# The value returned would be a python scalar.
return _np.clip(a, a_min, a_max, out=None)
return _mx_nd_np.clip(a, a_min, a_max, out=out)
@set_module('mxnet.numpy')
def argmax(a, axis=None, out=None):
r"""
Returns the indices of the maximum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
.. note::
In case of multiple occurrences of the maximum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmax
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmax.html>`_ in
the following aspects:
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmax(a)
array(5.)
>>> np.argmax(a, axis=0)
array([1., 1., 1.])
>>> np.argmax(a, axis=1)
array([2., 2.])
>>> b = np.arange(6)
>>> b[1] = 5
>>> b
array([0., 5., 2., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(1.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmax(a, axis=1, out=b)
array([2., 2.])
>>> b
array([2., 2.])
"""
return _mx_nd_np.argmax(a, axis, out)
@set_module('mxnet.numpy')
def argmin(a, axis=None, out=None):
r"""
Returns the indices of the minimum values along an axis.
Parameters
----------
a : ndarray
Input array. Only support ndarrays of dtype `float16`, `float32`, and `float64`.
axis : int, optional
By default, the index is into the flattened array, otherwise
along the specified axis.
out : ndarray or None, optional
If provided, the result will be inserted into this array. It should
be of the appropriate shape and dtype.
Returns
-------
index_array : ndarray of indices whose dtype is same as the input ndarray.
Array of indices into the array. It has the same shape as `a.shape`
with the dimension along `axis` removed.
.. note::
In case of multiple occurrences of the minimum values, the indices
corresponding to the first occurrence are returned.
This function differs from the original `numpy.argmin
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.argmin.html>`_ in
the following aspects:
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> a = np.arange(6).reshape(2,3) + 10
>>> a
array([[10., 11., 12.],
[13., 14., 15.]])
>>> np.argmin(a)
array(0.)
>>> np.argmin(a, axis=0)
array([0., 0., 0.])
>>> np.argmin(a, axis=1)
array([0., 0.])
>>> b = np.arange(6)
>>> b[2] = 0
>>> b
array([0., 1., 0., 3., 4., 5.])
>>> np.argmax(b) # Only the first occurrence is returned.
array(0.)
Specify ``out`` ndarray:
>>> a = np.arange(6).reshape(2,3) + 10
>>> b = np.zeros((2,))
>>> np.argmin(a, axis=1, out=b)
array([0., 0.])
>>> b
array([0., 0.])
"""
return _mx_nd_np.argmin(a, axis, out)
@set_module('mxnet.numpy')
def amax(a, axis=None, out=None, keepdims=False):
"""
Return the maximum of an array or maximum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
max : ndarray
Maximum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
min :
The minimum value of an array along a given axis, ignoring any nan.
maximum :
Element-wise maximum of two arrays, ignoring any nan.
argmax :
Return the indices of the maximum values.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `max` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``maximum(a[0], a[1])`` is faster than
``max(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.max(a) # Maximum of the flattened array
array(3.)
>>> np.max(a, axis=0) # Maxima along the first axis
array([2., 3.])
>>> np.max(a, axis=1) # Maxima along the second axis
array([1., 3.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.max(b)
array(4.)
"""
return _mx_nd_np.amax(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def amin(a, axis=None, out=None, keepdims=False):
"""
Return the minimum of an array or minimum along an axis.
Parameters
----------
a : ndarray
Input data.
axis : int, optional
Axis along which to operate. By default, flattened input is used.
out : ndarray, optional
Alternative output array in which to place the result. Must
be of the same shape and buffer length as the expected output.
See `doc.ufuncs` (Section "Output arguments") for more details.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
min : ndarray
Minimum of `a`. If `axis` is None, the result is an array of dimension 1.
If `axis` is given, the result is an array of dimension
``a.ndim - 1``.
See Also
--------
max :
The maximum value of an array along a given axis, ignoring any nan.
minimum :
Element-wise minimum of two arrays, ignoring any nan.
Notes
-----
NaN in the orginal `numpy` is denoted as nan and will be ignored.
Don't use `min` for element-wise comparison of 2 arrays; when
``a.shape[0]`` is 2, ``minimum(a[0], a[1])`` is faster than
``min(a, axis=0)``.
Examples
--------
>>> a = np.arange(4).reshape((2,2))
>>> a
array([[0., 1.],
[2., 3.]])
>>> np.min(a) # Minimum of the flattened array
array(0.)
>>> np.min(a, axis=0) # Minima along the first axis
array([0., 1.])
>>> np.min(a, axis=1) # Minima along the second axis
array([0., 2.])
>>> b = np.arange(5, dtype=np.float32)
>>> b[2] = np.nan
>>> np.min(b)
array(0.) # nan will be ignored
"""
return _mx_nd_np.amin(a, axis=axis, out=out, keepdims=keepdims)
@set_module('mxnet.numpy')
def average(a, axis=None, weights=None, returned=False, out=None):
"""
Compute the weighted average along the specified axis.
Parameters
--------
a : ndarray
Array containing data to be averaged.
axis : None or int or tuple of ints, optional
Axis or axes along which to average a.
The default, axis=None, will average over
all of the elements of the input array.
If axis is negative it counts from the last to the first axis.
New in version 1.7.0.
If axis is a tuple of ints, averaging is
performed on all of the axes specified in the tuple
instead of a single axis or all the axes as before.
weights : ndarray, optional
An array of weights associated with the values in a, must be the same dtype with a.
Each value in a contributes to the average according to its associated weight.
The weights array can either be 1-D (in which case its length must be
the size of a along the given axis) or of the same shape as a.
If weights=None, then all data in a are assumed to have a weight equal to one.
The 1-D calculation is: avg = sum(a * weights) / sum(weights)
The only constraint on weights is that sum(weights) must not be 0.
returned : bool, optional
Default is False.
If True, the tuple (average, sum_of_weights) is returned,
otherwise only the average is returned.
If weights=None, sum_of_weights is equivalent to
the number of elements over which the average is taken.
out : ndarray, optional
If provided, the calculation is done into this array.
Returns
--------
retval, [sum_of_weights] : ndarray
Return the average along the specified axis.
When returned is True, return a tuple with the average as the first element
and the sum of the weights as the second element. sum_of_weights is of the same type as retval.
If a is integral, the result dtype will be current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
otherwise it will be the same as dtype of a.
Raises
--------
MXNetError
* When all weights along axis sum to zero.
* When the length of 1D weights is not the same as the shape of a along axis.
* When given 1D weights, the axis is not specified or is not int.
* When the shape of weights and a differ, but weights are not 1D.
See also
--------
mean
.. note::
This function differs from the original `numpy.average`
<https://numpy.org/devdocs/reference/generated/numpy.average.html>`_ in
the following way(s):
* Does not guarantee the same behavior with numpy when given float16 dtype and overflow happens
* Does not support complex dtype
* The dtypes of a and weights must be the same
* Integral a results in float32 or float64 returned dtype:
* When npx.is_np_default_dtype() returns False, default dtype is float32,
* When npx.is_np_default_dtype() returns True, default dtype is float64;
Examples
--------
>>> data = np.arange(1, 5)
>>> data
array([1., 2., 3., 4.])
>>> np.average(data)
array(2.5)
>>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
array(4.)
>>> data = np.arange(6).reshape((3,2))
>>> data
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> weights = np.array([0.25, 0.75])
array([0.25, 0.75])
>>> np.average(data, axis=1, weights=weights)
array([0.75, 2.75, 4.75])
"""
return _mx_nd_np.average(a, axis=axis, weights=weights, returned=returned, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def mean(a, axis=None, dtype=None, out=None, keepdims=False): # pylint: disable=arguments-differ
"""
Compute the arithmetic mean along the specified axis.
Returns the average of the array elements.
The average is taken over the flattened array by default, otherwise over the specified axis.
Parameters
----------
a : ndarray
ndarray containing numbers whose mean is desired.
axis : None or int or tuple of ints, optional
Axis or axes along which the means are computed. The default is to compute the mean of the flattened array.
If this is a tuple of ints, a mean is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the mean.
For integer inputs, the default is of your current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64;
For floating point inputs, it is the same as the input dtype.
out : ndarray, optional
Alternate output array in which to place the result. The default is None; if provided,
it must have the same shape and type as the expected output.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result
as dimensions with size one. With this option, the result will broadcast correctly
against the input array.
If the default value is passed, then keepdims will not be passed through to the mean
method of sub-classes of ndarray, however any non-default value will be. If the sub-class
method does not implement keepdims any exceptions will be raised.
Returns
-------
m : ndarray, see dtype parameter above
If out=None, returns a new array containing the mean values,
otherwise a reference to the output array is returned.
.. note::
This function differs from the original `numpy.mean
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.mean.html>`_ in
the following way(s):
* only ndarray is accepted as valid input, python iterables or scalar is not supported
* default data type for integer input is float32 or float64, which depends on your current default dtype
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.mean(a)
array(2.5)
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0,:] = 1.0
>>> a[1,:] = 0.1
>>> np.mean(a)
array(0.55)
>>> np.mean(a, dtype=np.float64)
array(0.55, dtype=float64)
"""
return _npi.mean(a, axis=axis, dtype=dtype, keepdims=keepdims, out=out)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the standard deviation along the specified axis.
Returns the standard deviation, a measure of the spread of a distribution,
of the array elements. The standard deviation is computed for the
flattened array by default, otherwise over the specified axis.
Parameters
----------
a : array_like
Calculate the standard deviation of these values.
axis : None or int or tuple of ints, optional
Axis or axes along which the standard deviation is computed. The
default is to compute the standard deviation of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a standard deviation is performed over
multiple axes, instead of a single axis or all the axes as before.
dtype : dtype, optional
Type to use in computing the standard deviation. For arrays of
integer type the default is float64, for arrays of float types it is
the same as the array type.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output but the type (of the calculated
values) will be cast if necessary.
ddof : int, optional
Means Delta Degrees of Freedom. The divisor used in calculations
is ``N - ddof``, where ``N`` represents the number of elements.
By default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `std` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
standard_deviation : ndarray, see dtype parameter above.
If `out` is None, return a new array containing the standard deviation,
otherwise return a reference to the output array.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
1.1180339887498949 # may vary
>>> np.std(a, axis=0)
array([1., 1.])
>>> np.std(a, axis=1)
array([0.5, 0.5])
In single precision, std() can be inaccurate:
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.std(a)
array(0.45)
>>> np.std(a, dtype=np.float64)
array(0.45, dtype=float64)
"""
return _mx_nd_np.std(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
dimensional array, this returns those entries not returned by
`arr[obj]`.
Parameters
----------
arr : ndarray
Input array.
obj : slice, int or ndarray of ints
Indicate indices of sub-arrays to remove along the specified axis.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
Returns
-------
out : ndarray
A copy of `arr` with the elements specified by `obj` removed. Note
that `delete` does not occur in-place. If `axis` is None, `out` is
a flattened array.
Examples
--------
>>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]])
>>> arr
array([[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, 1, 0)
array([[ 1., 2., 3., 4.],
[ 9., 10., 11., 12.]])
>>> np.delete(arr, slice(None, None, 2), 1)
array([[ 2., 4.],
[ 6., 8.],
[10., 12.]])
>>> np.delete(arr, np.array([1,3,5]), None)
array([ 1., 3., 5., 7., 8., 9., 10., 11., 12.])
>>> np.delete(arr, np.array([1,1,5]), None)
array([ 1., 3., 4., 5., 7., 8., 9., 10., 11., 12.])
"""
return _mx_nd_np.delete(arr, obj, axis=axis)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the variance along the specified axis.
Returns the variance of the array elements, a measure of the spread of a
distribution. The variance is computed for the flattened array by
default, otherwise over the specified axis.
Parameters
----------
a : array_like
Array containing numbers whose variance is desired. If `a` is not an
array, a conversion is attempted.
axis : None or int or tuple of ints, optional
Axis or axes along which the variance is computed. The default is to
compute the variance of the flattened array.
.. versionadded:: 1.7.0
If this is a tuple of ints, a variance is performed over multiple axes,
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance.
For arrays of integer type, the default is of your current default dtype,
When npx.is_np_default_dtype() returns False, default dtype is float32,
When npx.is_np_default_dtype() returns True, default dtype is float64.
For arrays of float types it is the same as the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
the same shape as the expected output, but the type is cast if
necessary.
ddof : int, optional
"Delta Degrees of Freedom": the divisor used in the calculation is
``N - ddof``, where ``N`` represents the number of elements. By
default `ddof` is zero.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `var` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
Returns
-------
variance : ndarray, see dtype parameter above
If ``out=None``, returns a new array containing the variance;
otherwise, a reference to the output array is returned.
Examples
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.var(a)
array(1.25)
>>> np.var(a, axis=0)
array([1., 1.])
>>> np.var(a, axis=1)
array([0.25, 0.25])
>>> a = np.zeros((2, 512*512), dtype=np.float32)
>>> a[0, :] = 1.0
>>> a[1, :] = 0.1
>>> np.var(a)
array(0.2025)
>>> np.var(a, dtype=np.float64)
array(0.2025, dtype=float64)
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
"""
return _mx_nd_np.var(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def indices(dimensions, dtype=None, ctx=None):
"""Return an array representing the indices of a grid.
Compute an array where the subarrays contain index values 0,1,...
varying only along the corresponding axis.
Parameters
----------
dimensions : sequence of ints
The shape of the grid.
dtype : data-type, optional
The desired data-type for the array. Default is `int64`.
ctx : device context, optional
Device context on which the memory is allocated. Default is
`mxnet.context.current_context()`.
Returns
-------
grid : ndarray
The array of grid indices,
``grid.shape = (len(dimensions),) + tuple(dimensions)``.
Notes
-----
The output shape is obtained by prepending the number of dimensions
in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
``(r0, ..., rN-1)`` of length ``N``, the output shape is
``(N,r0,...,rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
grid[k,i0,i1,...,iN-1] = ik
Examples
--------
>>> grid = np.indices((2, 3))
>>> grid.shape
(2, 2, 3)
>>> grid[0] # row indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int64)
>>> grid[1] # column indices
array([[0, 0, 0],
[1, 1, 1]], dtype=int64)
The indices can be used as an index into an array.
>>> x = np.arange(20).reshape(5, 4)
>>> row, col = np.indices((2, 3))
>>> x[row, col]
array([[0., 1., 2.],
[4., 5., 6.]])
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
"""
return _mx_nd_np.indices(dimensions=dimensions, dtype=dtype, ctx=ctx)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
@wrap_np_binary_func
def copysign(x1, x2, out=None, **kwargs):
r"""
Change the sign of x1 to that of x2, element-wise.
If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : ndarray or scalar
Values to change the sign of.
x2 : ndarray or scalar
The sign of `x2` is copied to `x1`.
out : ndarray or None, optional
A location into which the result is stored. It must be of the
right shape and right type to hold the output. If not provided
or `None`,a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
The values of `x1` with the sign of `x2`.
This is a scalar if both `x1` and `x2` are scalars.
.. note::
This function differs from the original `numpy.copysign
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.copysign.html>`_ in
the following aspects:
* ``where`` param is not supported.
Examples
--------
>>> np.copysign(1.3, -1)
-1.3
>>> 1/np.copysign(0, 1)
inf
>>> 1/np.copysign(0, -1)
-inf
>>> a = np.array([-1, 0, 1])
>>> np.copysign(a, -1.1)
array([-1., -0., -1.])
>>> np.copysign(a, np.arange(3)-1)
array([-1., 0., 1.])
"""
return _mx_nd_np.copysign(x1, x2, out=out)
@set_module('mxnet.numpy')
def ravel(x, order='C'):
r"""
ravel(x)
Return a contiguous flattened array.
A 1-D array, containing the elements of the input, is returned. A copy is
made only if needed.
Parameters
----------
x : ndarray
Input array. The elements in `x` are read in row-major, C-style order and
packed as a 1-D array.
order : `C`, optional
Only support row-major, C-style order.
Returns
-------
y : ndarray
y is an array of the same subtype as `x`, with shape ``(x.size,)``.
Note that matrices are special cased for backward compatibility, if `x`
is a matrix, then y is a 1-D ndarray.
.. note::
This function differs from the original numpy.arange in the following aspects:
* Only support row-major, C-style order.
Examples
--------
It is equivalent to ``reshape(x, -1)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
>>> print(np.ravel(x))
[1. 2. 3. 4. 5. 6.]
>>> print(x.reshape(-1))
[1. 2. 3. 4. 5. 6.]
>>> print(np.ravel(x.T))
[1. 4. 2. 5. 3. 6.]
"""
return _mx_nd_np.ravel(x, order)
@set_module('mxnet.numpy')
def unravel_index(indices, shape, order='C'): # pylint: disable=redefined-outer-name
"""
Converts a flat index or array of flat indices into a tuple of coordinate arrays.
Parameters
----------
indices : array_like
An integer array whose elements are indices into the flattened version of an array of dimensions shape.
Before version 1.6.0, this function accepted just one index value.
shape : tuple of ints
The shape of the array to use for unraveling indices.
order : Only row-major is supported currently.
Returns
-------
unraveled_coords : ndarray
Each row in the ndarray has the same shape as the indices array.
Each column in the ndarray represents the unravelled index
Examples:
-------------
>>> np.unravel_index([22, 41, 37], (7,6))
[[3. 6. 6.]
[4. 5. 1.]]
>>> np.unravel_index(1621, (6,7,8,9))
[3, 1, 4, 1]
"""
return _mx_nd_np.unravel_index(indices, shape, order=order)
@set_module('mxnet.numpy')
def flatnonzero(a):
r"""
Return indices that are non-zero in the flattened version of a.
This is equivalent to np.nonzero(np.ravel(a))[0].
Parameters
----------
a : array_like
Input data.
Returns
-------
res : ndarray
Output array, containing the indices of the elements of `a.ravel()`
that are non-zero.
See Also
--------
nonzero : Return the indices of the non-zero elements of the input array.
ravel : Return a 1-D array containing the elements of the input array.
Examples
--------
>>> x = np.arange(-2, 3)
>>> x
array([-2, -1, 0, 1, 2])
>>> np.flatnonzero(x)
array([0, 1, 3, 4])
Use the indices of the non-zero elements as an index array to extract
these elements:
>>> x.ravel()[np.flatnonzero(x)]
array([-2, -1, 1, 2])
"""
return _mx_nd_np.flatnonzero(a)
@set_module('mxnet.numpy')
def diag_indices_from(arr):
"""
This returns a tuple of indices that can be used to access the main diagonal of an array
a with a.ndim >= 2 dimensions and shape (n, n, ..., n). For a.ndim = 2 this is
the usual diagonal, for a.ndim > 2 this is the set of indices to access
a[i, i, ..., i] for i = [0..n-1].
Parameters
----------
arr : ndarray
Input array for acessing the main diagonal. All dimensions
should have equal length.
Return:
-------------
diag: tuple of ndarray
indices of the main diagonal.
Examples:
-------------
>>> a = np.arange(16).reshape(4, 4)
>>> a
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> idx = np.diag_indices_from(a)
>>> idx
(array([0, 1, 2, 3]), array([0, 1, 2, 3]))
>>> a[idx] = 100
>>> a
array([[100, 1, 2, 3],
[ 4, 100, 6, 7],
[ 8, 9, 100, 11],
[ 12, 13, 14, 100]])
"""
return _mx_nd_np.diag_indices_from(arr)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def hanning(M, dtype=None, ctx=None):
r"""Return the Hanning window.
The Hanning window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
blackman, hamming
Notes
-----
The Hanning window is defined as
.. math:: w(n) = 0.5 - 0.5cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hanning was named for Julius von Hann, an Austrian meteorologist.
It is also known as the Cosine Bell. Some authors prefer that it be
called a Hann window, to help avoid confusion with the very similar
Hamming window.
Most references to the Hanning window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics",
The University of Alberta Press, 1975, pp. 106-108.
.. [3] Wikipedia, "Window function",
http://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hanning(12)
array([0. , 0.07937324, 0.29229254, 0.5711574 , 0.8274304 ,
0.9797465 , 0.97974646, 0.82743025, 0.5711573 , 0.29229245,
0.07937312, 0. ])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hanning(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
Text(0.5, 1.0, 'Hann window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.hanning(M, dtype=dtype, ctx=ctx)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def hamming(M, dtype=None, ctx=None):
r"""Return the hamming window.
The hamming window is a taper formed by using a weighted cosine.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray, shape(M,)
The window, with the maximum value normalized to one (the value
one appears only if `M` is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
blackman, hanning
Notes
-----
The Hamming window is defined as
.. math:: w(n) = 0.54 - 0.46cos\left(\frac{2\pi{n}}{M-1}\right)
\qquad 0 \leq n \leq M-1
The Hamming was named for R. W. Hamming, an associate of J. W. Tukey
and is described in Blackman and Tukey. It was recommended for
smoothing the truncated autocovariance function in the time domain.
Most references to the Hamming window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function.
References
----------
.. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power
spectra, Dover Publications, New York.
.. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The
University of Alberta Press, 1975, pp. 109-110.
.. [3] Wikipedia, "Window function",
https://en.wikipedia.org/wiki/Window_function
.. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling,
"Numerical Recipes", Cambridge University Press, 1986, page 425.
Examples
--------
>>> np.hamming(12)
array([0.08000001, 0.15302339, 0.34890914, 0.6054648 , 0.841236 ,
0.9813669 , 0.9813668 , 0.8412359 , 0.6054647 , 0.34890908,
0.15302327, 0.08000001])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.hamming(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("hamming window")
Text(0.5, 1.0, 'hamming window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.hamming(M, dtype=dtype, ctx=ctx)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def blackman(M, dtype=None, ctx=None):
r"""Return the Blackman window.
The Blackman window is a taper formed by using the first three
terms of a summation of cosines. It was designed to have close to the
minimal leakage possible. It is close to optimal, only slightly worse
than a Kaiser window.
Parameters
----------
M : int
Number of points in the output window. If zero or less, an
empty array is returned.
ctx : Context, optional
An optional device context (default is the current default context).
Returns
-------
out : ndarray
The window, with the maximum value normalized to one (the value one
appears only if the number of samples is odd).
When npx.is_np_default_dtype() returns False, default dtype is float32;
When npx.is_np_default_dtype() returns True, default dtype is float64.
Note that you need select numpy.float32 or float64 in this operator.
See Also
--------
hamming, hanning
Notes
-----
The Blackman window is defined as
.. math:: w(n) = 0.42 - 0.5 \cos(2\pi n/{M-1}) + 0.08 \cos(4\pi n/{M-1})
Most references to the Blackman window come from the signal processing
literature, where it is used as one of many windowing functions for
smoothing values. It is also known as an apodization (which means
"removing the foot", i.e. smoothing discontinuities at the beginning
and end of the sampled signal) or tapering function. It is known as a
"near optimal" tapering function, almost as good (by some measures)
as the kaiser window.
References
----------
Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra,
Dover Publications, New York.
Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing.
Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471.
Examples
--------
>>> np.blackman(12)
array([-1.4901161e-08, 3.2606423e-02, 1.5990365e-01, 4.1439798e-01,
7.3604530e-01, 9.6704686e-01, 9.6704674e-01, 7.3604506e-01,
4.1439781e-01, 1.5990359e-01, 3.2606363e-02, -1.4901161e-08])
Plot the window and its frequency response:
>>> import matplotlib.pyplot as plt
>>> window = np.blackman(51)
>>> plt.plot(window.asnumpy())
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("blackman window")
Text(0.5, 1.0, 'blackman window')
>>> plt.ylabel("Amplitude")
Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
Text(0.5, 0, 'Sample')
>>> plt.show()
"""
return _mx_nd_np.blackman(M, dtype=dtype, ctx=ctx)
@set_module('mxnet.numpy')
def flip(m, axis=None, out=None):
r"""
flip(m, axis=None, out=None)
Reverse the order of elements in an array along the given axis.
The shape of the array is preserved, but the elements are reordered.
Parameters
----------
m : ndarray or scalar
Input array.
axis : None or int or tuple of ints, optional
Axis or axes along which to flip over. The default,
axis=None, will flip over all of the axes of the input array.
If axis is negative it counts from the last to the first axis.
If axis is a tuple of ints, flipping is performed on all of the axes
specified in the tuple.
out : ndarray or scalar, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
out : ndarray or scalar
A view of `m` with the entries of axis reversed. Since a view is
returned, this operation is done in constant time.
Examples
--------
>>> A = np.arange(8).reshape((2,2,2))
>>> A
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
>>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
[4, 5]]])
>>> np.flip(A)
array([[[7, 6],
[5, 4]],
[[3, 2],
[1, 0]]])
>>> np.flip(A, (0, 2))
array([[[5, 4],
[7, 6]],
[[1, 0],
[3, 2]]])
"""
return _mx_nd_np.flip(m, axis, out=out)
@set_module('mxnet.numpy')
def flipud(m):
r"""
flipud(*args, **kwargs)
Flip array in the up/down direction.
Flip the entries in each column in the up/down direction.
Rows are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array.
Returns
-------
out : array_like
A view of `m` with the rows reversed. Since a view is
returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
fliplr : Flip array in the left/right direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to ``m[::-1,...]``.
Does not require the array to be two-dimensional.
Examples
--------
>>> A = np.diag(np.array([1.0, 2, 3]))
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.flipud(A)
array([[0., 0., 3.],
[0., 2., 0.],
[1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
array(True)
>>> np.flipud(np.array([1,2]))
array([2., 1.])
"""
return flip(m, 0)
@set_module('mxnet.numpy')
def fliplr(m):
r"""
fliplr(*args, **kwargs)
Flip array in the left/right direction.
Flip the entries in each row in the left/right direction.
Columns are preserved, but appear in a different order than before.
Parameters
----------
m : array_like
Input array, must be at least 2-D.
Returns
-------
f : ndarray
A view of `m` with the columns reversed. Since a view
is returned, this operation is :math:`\mathcal O(1)`.
See Also
--------
flipud : Flip array in the up/down direction.
rot90 : Rotate array counterclockwise.
Notes
-----
Equivalent to m[:,::-1]. Requires the array to be at least 2-D.
Examples
--------
>>> A = np.diag([1.,2.,3.])
>>> A
array([[1., 0., 0.],
[0., 2., 0.],
[0., 0., 3.]])
>>> np.fliplr(A)
array([[0., 0., 1.],
[0., 2., 0.],
[3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
array(True)
"""
return flip(m, 1)
@set_module('mxnet.numpy')
def around(x, decimals=0, out=None, **kwargs):
r"""
around(x, decimals=0, out=None)
Evenly round to the given number of decimals.
Parameters
----------
x : ndarray or scalar
Input data.
decimals : int, optional
Number of decimal places to round to (default: 0). If
decimals is negative, it specifies the number of positions to
the left of the decimal point.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape and type as the expected output.
Returns
-------
rounded_array : ndarray or scalar
An array of the same type as `x`, containing the rounded values.
A reference to the result is returned.
.. note::
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
-0.5 and 0.5 round to 0.0, etc.
This function differs from the original numpy.prod in the following aspects:
* Cannot cast type automatically. Dtype of `out` must be same as the expected one.
* Cannot support complex-valued number.
Examples
--------
>>> np.around([0.37, 1.64])
array([ 0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
array([ 0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
array([ 0., 2., 2., 4., 4.])
>>> np.around([1, 2, 3, 11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1, 2, 3, 11], decimals=-1)
array([ 0, 0, 0, 10])
"""
return _mx_nd_np.around(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
def round(x, decimals=0, out=None, **kwargs):
r"""
round(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return _mx_nd_np.round(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
def round_(x, decimals=0, out=None, **kwargs):
r"""
round_(a, decimals=0, out=None)
Round an array to the given number of decimals.
See Also
--------
around : equivalent function; see for details.
"""
return _mx_nd_np.round_(x, decimals, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def arctan2(x1, x2, out=None, **kwargs):
r"""
Element-wise arc tangent of ``x1/x2`` choosing the quadrant correctly.
The quadrant (i.e., branch) is chosen so that ``arctan2(x1, x2)`` is
the signed angle in radians between the ray ending at the origin and
passing through the point (1,0), and the ray ending at the origin and
passing through the point (`x2`, `x1`). (Note the role reversal: the
"`y`-coordinate" is the first function parameter, the "`x`-coordinate"
is the second.) By IEEE convention, this function is defined for
`x2` = +/-0 and for either or both of `x1` and `x2` = +/-inf (see
Notes for specific values).
This function is not defined for complex-valued arguments; for the
so-called argument of complex values, use `angle`.
Parameters
----------
x1 : ndarray or scalar
`y`-coordinates.
x2 : ndarray or scalar
`x`-coordinates. `x2` must be broadcastable to match the shape of
`x1` or vice versa.
out : ndarray or None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Array of angles in radians, in the range ``[-pi, pi]``. This is a scalar if
`x1` and `x2` are scalars.
.. notes::
*arctan2* is identical to the ``atan2`` function of the underlying
C library. The following special values are defined in the C
standard: [1]_
+========+========+==================+
| `x1` | `x2` | `arctan2(x1,x2)` |
+========+========+==================+
| +/- 0 | +0 | +/- 0 |
+========+========+==================+
| +/- 0 | -0 | +/- pi |
+========+========+==================+
| > 0 | +/-inf | +0 / +pi |
+========+========+==================+
| < 0 | +/-inf | -0 / -pi |
+========+========+==================+
| +/-inf | +inf | +/- (pi/4) |
+========+========+==================+
| +/-inf | -inf | +/- (3*pi/4) |
+========+========+==================+
Note that +0 and -0 are distinct floating point numbers, as are +inf
and -inf.
This function differs from the original numpy.arange in the following aspects:
* Only support float16, float32 and float64.
References
----------
.. [1] ISO/IEC standard 9899:1999, "Programming language C."
Examples
--------
Consider four points in different quadrants:
>>> x = np.array([-1, +1, +1, -1])
>>> y = np.array([-1, -1, +1, +1])
>>> np.arctan2(y, x) * 180 / np.pi
array([-135., -45., 45., 135.])
Note the order of the parameters. `arctan2` is defined also when `x2` = 0
and at several other special points, obtaining values in
the range ``[-pi, pi]``:
>>> x = np.array([1, -1])
>>> y = np.array([0, 0])
>>> np.arctan2(x, y)
array([ 1.5707964, -1.5707964])
"""
return _mx_nd_np.arctan2(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def hypot(x1, x2, out=None, **kwargs):
r"""
Given the "legs" of a right triangle, return its hypotenuse.
Equivalent to ``sqrt(x1**2 + x2**2)``, element-wise. If `x1` or
`x2` is scalar_like (i.e., unambiguously cast-able to a scalar type),
it is broadcast for use with each element of the other argument.
Parameters
----------
x1, x2 : array_like
Leg of the triangle(s).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
z : ndarray
The hypotenuse of the triangle(s).
This is a scalar if both `x1` and `x2` are scalars.
.. note::
This function differs from the original numpy.arange in the following aspects:
* Only support float16, float32 and float64.
Examples
--------
>>> np.hypot(3*np.ones((3, 3)), 4*np.ones((3, 3)))
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
Example showing broadcast of scalar_like argument:
>>> np.hypot(3*np.ones((3, 3)), [4])
array([[ 5., 5., 5.],
[ 5., 5., 5.],
[ 5., 5., 5.]])
"""
return _mx_nd_np.hypot(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_and(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_and(13, 17)
1
>>> np.bitwise_and(14, 13)
12
>>> np.bitwise_and(np.array([14,3], dtype='int32'), 13)
array([26, 5], dtype=int32)
>>> np.bitwise_and(np.array([11,7], dtype='int32'), np.array([4,25], dtype='int32'))
array([0, 1], dtype=int32)
>>> np.bitwise_and(np.array([2,5,255], dtype='int32'), np.array([3,14,16], dtype='int32'))
array([ 2, 4, 16], dtype=int32)
>>> np.bitwise_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _mx_nd_np.bitwise_and(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_xor(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise XOR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_xor(13, 17)
28
>>> np.bitwise_xor(31, 5)
26
>>> np.bitwise_xor(np.array([31,3], dtype=np.int32), 5)
array([26, 6], dtype=int32)
>>> np.bitwise_xor(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([26, 5], dtype=int32)
>>> np.bitwise_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _mx_nd_np.bitwise_xor(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def bitwise_or(x1, x2, out=None, **kwargs):
r"""
Compute the bit-wise OR of two arrays element-wise.
Parameters
----------
x1, x2 : ndarray or scalar
Only integer and boolean types are handled. If x1.shape != x2.shape,
they must be broadcastable to a common shape (which becomes the shape of the output).
out : ndarray, optional
A location into which the result is stored. If provided, it must have a shape that the
inputs broadcast to. If not provided or None, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Result.
Examples
--------
>>> np.bitwise_or(13, 17)
29
>>> np.bitwise_or(31, 5)
31
>>> np.bitwise_or(np.array([31,3], dtype=np.int32), 5)
array([31, 7])
>>> np.bitwise_or(np.array([31,3], dtype='int32'), np.array([5,6], dtype='int32'))
array([31, 7])
>>> np.bitwise_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, True])
"""
return _mx_nd_np.bitwise_or(x1, x2, out=out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def ldexp(x1, x2, out=None, **kwargs):
"""
Returns x1 * 2**x2, element-wise.
The mantissas `x1` and twos exponents `x2` are used to construct
floating point numbers ``x1 * 2**x2``.
Parameters
----------
x1 : ndarray or scalar
Array of multipliers.
x2 : ndarray or scalar, int
Array of twos exponents.
out : ndarray, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not, a freshly-allocated array is returned.
Returns
-------
y : ndarray or scalar
The result of ``x1 * 2**x2``.
This is a scalar if both `x1` and `x2` are scalars.
Notes
-----
Complex dtypes are not supported, they will raise a TypeError.
Different from numpy, we allow x2 to be float besides int.
`ldexp` is useful as the inverse of `frexp`, if used by itself it is
more clear to simply use the expression ``x1 * 2**x2``.
Examples
--------
>>> np.ldexp(5, np.arange(4))
array([ 5., 10., 20., 40.])
"""
return _mx_nd_np.ldexp(x1, x2, out)
@set_module('mxnet.numpy')
def vdot(a, b):
r"""
Return the dot product of two vectors.
Note that `vdot` handles multidimensional arrays differently than `dot`:
it does *not* perform a matrix product, but flattens input arguments
to 1-D vectors first. Consequently, it should only be used for vectors.
Parameters
----------
a : ndarray
First argument to the dot product.
b : ndarray
Second argument to the dot product.
Returns
-------
output : ndarray
Dot product of `a` and `b`.
See Also
--------
dot : Return the dot product without using the complex conjugate of the
first argument.
Examples
--------
Note that higher-dimensional arrays are flattened!
>>> a = np.array([[1, 4], [5, 6]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.vdot(a, b)
array(30.)
>>> np.vdot(b, a)
array(30.)
>>> 1*4 + 4*1 + 5*2 + 6*2
30
"""
return tensordot(a.flatten(), b.flatten(), 1)
@set_module('mxnet.numpy')
def inner(a, b):
r"""Inner product of two arrays.
Ordinary inner product of vectors for 1-D arrays (without complex
conjugation), in higher dimensions a sum product over the last axes.
Parameters
----------
a, b : ndarray
If `a` and `b` are nonscalar, their last dimensions must match.
Returns
-------
out : ndarray
`out.shape = a.shape[:-1] + b.shape[:-1]`
Raises
------
ValueError
If the last dimension of `a` and `b` has different size.
See Also
--------
tensordot : Sum products over arbitrary axes.
dot : Generalised matrix product, using second last dimension of `b`.
einsum : Einstein summation convention.
.. note::
For vectors (1-D arrays) it computes the ordinary inner-product::
np.inner(a, b) = sum(a[:]*b[:])
More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
or explicitly::
np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
= sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
In addition `a` or `b` may be scalars, in which case::
np.inner(a,b) = a*b
Examples
--------
Ordinary inner product for vectors:
>>> a = np.array([1,2,3])
>>> b = np.array([0,1,0])
>>> np.inner(a, b)
array(2.)
A multidimensional example:
>>> a = np.arange(24).reshape((2,3,4))
>>> b = np.arange(4)
>>> np.inner(a, b)
array([[ 14., 38., 62.],
[ 86., 110., 134.]])
"""
return tensordot(a, b, [-1, -1])
@set_module('mxnet.numpy')
def outer(a, b):
r"""Compute the outer product of two vectors.
Given two vectors, ``a = [a0, a1, ..., aM]`` and
``b = [b0, b1, ..., bN]``,
the outer product [1]_ is::
[[a0*b0 a0*b1 ... a0*bN ]
[a1*b0 .
[ ... .
[aM*b0 aM*bN ]]
Parameters
----------
a : (M,) ndarray
First input vector. Input is flattened if
not already 1-dimensional.
b : (N,) ndarray
Second input vector. Input is flattened if
not already 1-dimensional.
Returns
-------
out : (M, N) ndarray
``out[i, j] = a[i] * b[j]``
See also
--------
inner
einsum : ``einsum('i,j->ij', a.ravel(), b.ravel())`` is the equivalent.
ufunc.outer : A generalization to N dimensions and other operations.
``np.multiply.outer(a.ravel(), b.ravel())`` is the equivalent.
References
----------
.. [1] : G. H. Golub and C. F. Van Loan, *Matrix Computations*, 3rd
ed., Baltimore, MD, Johns Hopkins University Press, 1996,
pg. 8.
Examples
--------
Make a (*very* coarse) grid for computing a Mandelbrot set:
>>> rl = np.outer(np.ones((5,)), np.linspace(-2, 2, 5))
>>> rl
array([[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.],
[-2., -1., 0., 1., 2.]])
"""
return tensordot(a.flatten(), b.flatten(), 0)
@set_module('mxnet.numpy')
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None): # pylint: disable=too-many-arguments
"""
Return the cross product of two (arrays of) vectors.
The cross product of `a` and `b` in :math:`R^3` is a vector perpendicular
to both `a` and `b`. If `a` and `b` are arrays of vectors, the vectors
are defined by the last axis of `a` and `b` by default, and these axes
can have dimensions 2 or 3. Where the dimension of either `a` or `b` is
2, the third component of the input vector is assumed to be zero and the
cross product calculated accordingly. In cases where both input vectors
have dimension 2, the z-component of the cross product is returned.
Parameters
----------
a : ndarray
Components of the first vector(s).
b : ndarray
Components of the second vector(s).
axisa : int, optional
Axis of `a` that defines the vector(s). By default, the last axis.
axisb : int, optional
Axis of `b` that defines the vector(s). By default, the last axis.
axisc : int, optional
Axis of `c` containing the cross product vector(s). Ignored if
both input vectors have dimension 2, as the return is scalar.
By default, the last axis.
axis : int, optional
If defined, the axis of `a`, `b` and `c` that defines the vector(s)
and cross product(s). Overrides `axisa`, `axisb` and `axisc`.
Returns
-------
c : ndarray
Vector cross product(s).
Raises
------
ValueError
When the dimension of the vector(s) in `a` and/or `b` does not
equal 2 or 3.
Notes
-----
Supports full broadcasting of the inputs.
Examples
--------
Vector cross-product.
>>> x = np.array([1., 2., 3.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([-3., 6., -3.])
One vector with dimension 2.
>>> x = np.array([1., 2.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([12., -6., -3.])
Equivalently:
>>> x = np.array([1., 2., 0.])
>>> y = np.array([4., 5., 6.])
>>> np.cross(x, y)
array([12., -6., -3.])
Both vectors with dimension 2.
>>> x = np.array([1., 2.])
>>> y = np.array([4., 5.])
>>> np.cross(x, y)
array(-3.)
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
>>> x = np.array([[1., 2., 3.], [4., 5., 6.]])
>>> y = np.array([[4., 5., 6.], [1., 2., 3.]])
>>> np.cross(x, y)
array([[-3., 6., -3.],
[ 3., -6., 3.]])
The orientation of `c` can be changed using the `axisc` keyword.
>>> np.cross(x, y, axisc=0)
array([[-3., 3.],
[ 6., -6.],
[-3., 3.]])
Change the vector definition of `x` and `y` using `axisa` and `axisb`.
>>> x = np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]])
>>> y = np.array([[7., 8., 9.], [4., 5., 6.], [1., 2., 3.]])
>>> np.cross(x, y)
array([[ -6., 12., -6.],
[ 0., 0., 0.],
[ 6., -12., 6.]])
>>> np.cross(x, y, axisa=0, axisb=0)
array([[-24., 48., -24.],
[-30., 60., -30.],
[-36., 72., -36.]])
"""
return _mx_nd_np.cross(a, b, axisa=axisa, axisb=axisb, axisc=axisc, axis=axis)
@set_module('mxnet.numpy')
def kron(a, b):
r"""Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : ndarray
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
.. note::
The function assumes that the number of dimensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
"""
return _mx_nd_np.kron(a, b)
@set_module('mxnet.numpy')
def equal(x1, x2, out=None):
"""
Return (x1 == x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
not_equal, greater_equal, less_equal, greater, less
Examples
--------
>>> np.equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.equal(1, np.ones(1))
array([ True])
"""
return _mx_nd_np.equal(x1, x2, out)
@set_module('mxnet.numpy')
def not_equal(x1, x2, out=None):
"""
Return (x1 != x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.not_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.not_equal(1, np.ones(1))
array([False])
"""
return _mx_nd_np.not_equal(x1, x2, out)
@set_module('mxnet.numpy')
def greater(x1, x2, out=None):
"""
Return the truth value of (x1 > x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater(1, np.ones(1))
array([False])
"""
return _mx_nd_np.greater(x1, x2, out)
@set_module('mxnet.numpy')
def less(x1, x2, out=None):
"""
Return the truth value of (x1 < x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.less(1, np.ones(1))
array([False])
"""
return _mx_nd_np.less(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_and(x1, x2, out=None):
r"""
Compute the truth value of x1 AND x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical AND is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical AND operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_or, logical_not, logical_xor, bitwise_or
Examples
--------
>>> np.logical_and(True, False)
False
>>> np.logical_and(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([False, True])
"""
return _mx_nd_np.logical_and(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_or(x1, x2, out=None):
r"""
Compute the truth value of x1 OR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical OR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_and, logical_not, logical_xor, bitwise_or
Examples
--------
>>> np.logical_or(True, False)
True
>>> np.logical_or(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([True, True])
"""
return _mx_nd_np.logical_or(x1, x2, out)
@set_module('mxnet.numpy')
@wrap_np_binary_func
def logical_xor(x1, x2, out=None):
r"""
Compute the truth value of x1 XOR x2 element-wise.
Parameters
----------
x1, x2 : array_like
Logical XOR is applied to the elements of `x1` and `x2`.
If ``x1.shape != x2.shape``, they must be broadcastable to a common
shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
Returns
-------
y : ndarray or bool
Boolean result of the logical XOR operation applied to the elements
of `x1` and `x2`; the shape is determined by broadcasting.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
logical_and, logical_not, logical_or, bitwise_or
Examples
--------
>>> np.logical_xor(True, False)
True
>>> np.logical_xor(np.array([True, True], dtype='bool'), np.array([False, True], dtype='bool'))
array([ True, False])
"""
return _mx_nd_np.logical_xor(x1, x2, out)
@set_module('mxnet.numpy')
def greater_equal(x1, x2, out=None):
"""
Return the truth value of (x1 >= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.greater_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[ True, True, True],
[ True, True, True]])
>>> np.greater_equal(1, np.ones(1))
array([True])
"""
return _mx_nd_np.greater_equal(x1, x2, out)
@set_module('mxnet.numpy')
def less_equal(x1, x2, out=None):
"""
Return the truth value of (x1 <= x2) element-wise.
Parameters
----------
x1, x2 : ndarrays or scalars
Input arrays. If ``x1.shape != x2.shape``, they must be broadcastable to
a common shape (which becomes the shape of the output).
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If provided, it must have
a shape that the inputs broadcast to. If not provided or `None`,
a freshly-allocated array is returned.
Returns
-------
out : ndarray or scalar
Output array of type bool, element-wise comparison of `x1` and `x2`.
This is a scalar if both `x1` and `x2` are scalars.
See Also
--------
equal, greater, greater_equal, less, less_equal
Examples
--------
>>> np.less_equal(np.ones(2, 1)), np.zeros(1, 3))
array([[False, False, False],
[False, False, False]])
>>> np.less_equal(1, np.ones(1))
array([True])
"""
return _mx_nd_np.less_equal(x1, x2, out)
@set_module('mxnet.numpy')
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
Elements that roll beyond the last position are re-introduced at
the first.
Parameters
----------
a : ndarray
Input array.
shift : int or tuple of ints
The number of places by which elements are shifted. If a tuple,
then `axis` must be a tuple of the same size, and each of the
given axes is shifted by the corresponding number. If an int
while `axis` is a tuple of ints, then the same value is used for
all given axes.
axis : int or tuple of ints, optional
Axis or axes along which elements are shifted. By default, the
array is flattened before shifting, after which the original
shape is restored.
Returns
-------
res : ndarray
Output array, with the same shape as `a`.
Notes
-----
Supports rolling over multiple dimensions simultaneously.
Examples
--------
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8., 9., 0., 1., 2., 3., 4., 5., 6., 7.])
>>> np.roll(x, -2)
array([2., 3., 4., 5., 6., 7., 8., 9., 0., 1.])
>>> x2 = np.reshape(x, (2,5))
>>> x2
array([[0., 1., 2., 3., 4.],
[5., 6., 7., 8., 9.]])
>>> np.roll(x2, 1)
array([[9., 0., 1., 2., 3.],
[4., 5., 6., 7., 8.]])
>>> np.roll(x2, -1)
array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 0.]])
>>> np.roll(x2, 1, axis=0)
array([[5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.]])
>>> np.roll(x2, -1, axis=0)
array([[5., 6., 7., 8., 9.],
[0., 1., 2., 3., 4.]])
>>> np.roll(x2, 1, axis=1)
array([[4., 0., 1., 2., 3.],
[9., 5., 6., 7., 8.]])
>>> np.roll(x2, -1, axis=1)
array([[1., 2., 3., 4., 0.],
[6., 7., 8., 9., 5.]])
"""
return _mx_nd_np.roll(a, shift, axis=axis)
@set_module('mxnet.numpy')
def rot90(m, k=1, axes=(0, 1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
Rotation direction is from the first towards the second axis.
Parameters
----------
m : ndarray
Array of two or more dimensions.
k : integer
Number of times the array is rotated by 90 degrees.
axes: (2,) array_like
The array is rotated in the plane defined by the axes.
Axes must be different.
Returns
-------
y : ndarray
A rotated view of `m`.
Notes
-----
rot90(m, k=1, axes=(1,0)) is the reverse of rot90(m, k=1, axes=(0,1))
rot90(m, k=1, axes=(1,0)) is equivalent to rot90(m, k=-1, axes=(0,1))
Examples
--------
>>> m = np.array([[1,2],[3,4]], 'int')
>>> m
array([[1, 2],
[3, 4]], dtype=int64)
>>> np.rot90(m)
array([[2, 4],
[1, 3]], dtype=int64)
>>> np.rot90(m, 2)
array([[4, 3],
[2, 1]], dtype=int64)
>>> m = np.arange(8).reshape((2,2,2))
>>> np.rot90(m, 1, (1,2))
array([[[1., 3.],
[0., 2.]],
[[5., 7.],
[4., 6.]]])
"""
return _mx_nd_np.rot90(m, k=k, axes=axes)
@set_module('mxnet.numpy')
def hsplit(ary, indices_or_sections):
"""Split an array into multiple sub-arrays horizontally (column-wise).
This is equivalent to ``split`` with ``axis=0`` if ``ary`` has one
dimension, and otherwise that with ``axis=1``.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections : int, list of ints or tuple of ints.
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a list of sorted integers, the entries
indicate where along `axis` the array is split.
If an index exceeds the dimension of the array along `axis`,
it will raises errors. so index must less than or euqal to
the dimension of the array along axis.
Returns
-------
sub-arrays : list of ndarrays
A list of sub-arrays.
.. note::
* If `indices_or_sections` is given as an integer, but a split
does not result in equal division.It will raises ValueErrors.
* If indices_or_sections is an integer, and the number is 1, it will
raises an error. Because single output from split is not supported yet...
See Also
--------
split : Split an array into multiple sub-arrays of equal size.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
array([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.],
[12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[10., 11.],
[14., 15.]])]
>>> np.hsplit(x, [3, 6])
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[12., 13., 14.]]),
array([[ 3.],
[ 7.],
[11.],
[15.]]),
array([], shape=(4, 0), dtype=float32)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
array([[[ 0., 1.],
[ 2., 3.]],
[[ 4., 5.],
[ 6., 7.]]])
>>> np.hsplit(x, 2)
[array([[[ 0., 1.]],
[[ 4., 5.]]]),
array([[[ 2., 3.]],
[[ 6., 7.]]])]
If ``ary`` has one dimension, 'axis' = 0.
>>> x = np.arange(4)
array([0., 1., 2., 3.])
>>> np.hsplit(x, 2)
[array([0., 1.]), array([2., 3.])]
If you want to produce an empty sub-array, you can see an example.
>>> np.hsplit(x, [2, 2])
[array([0., 1.]), array([], dtype=float32), array([2., 3.])]
"""
return _mx_nd_np.hsplit(ary, indices_or_sections)
@set_module('mxnet.numpy')
def einsum(*operands, **kwargs):
r"""
einsum(subscripts, *operands, out=None, optimize=False)
Evaluates the Einstein summation convention on the operands.
Using the Einstein summation convention, many common multi-dimensional,
linear algebraic array operations can be represented in a simple fashion.
In *implicit* mode `einsum` computes these values.
In *explicit* mode, `einsum` provides further flexibility to compute
other array operations that might not be considered classical Einstein
summation operations, by disabling, or forcing summation over specified
subscript labels.
See the notes and examples for clarification.
Parameters
----------
subscripts : str
Specifies the subscripts for summation as comma separated list of
subscript labels. An implicit (classical Einstein summation)
calculation is performed unless the explicit indicator '->' is
included as well as subscript labels of the precise output form.
operands : list of ndarray
These are the arrays for the operation.
out : ndarray, optional
If provided, the calculation is done into this array.
optimize : {False, True}, optional
Controls if intermediate optimization should occur. No optimization
will occur if False. Defaults to False.
Returns
-------
output : ndarray
The calculation based on the Einstein summation convention.
Notes
-----
The Einstein summation convention can be used to compute
many multi-dimensional, linear algebraic array operations. `einsum`
provides a succinct way of representing these.
A non-exhaustive list of these operations,
which can be computed by `einsum`, is shown below along with examples:
* Trace of an array, :py:func:`np.trace`.
* Return a diagonal, :py:func:`np.diag`.
* Array axis summations, :py:func:`np.sum`.
* Transpositions and permutations, :py:func:`np.transpose`.
* Matrix multiplication and dot product, :py:func:`np.matmul` :py:func:`np.dot`.
* Vector inner and outer products, :py:func:`np.inner` :py:func:`np.outer`.
* Broadcasting, element-wise and scalar multiplication, :py:func:`np.multiply`.
* Tensor contractions, :py:func:`np.tensordot`.
The subscripts string is a comma-separated list of subscript labels,
where each label refers to a dimension of the corresponding operand.
Whenever a label is repeated it is summed, so ``np.einsum('i,i', a, b)``
is equivalent to :py:func:`np.inner(a,b) <np.inner>`. If a label
appears only once, it is not summed, so ``np.einsum('i', a)`` produces a
view of ``a`` with no changes. A further example ``np.einsum('ij,jk', a, b)``
describes traditional matrix multiplication and is equivalent to
:py:func:`np.matmul(a,b) <np.matmul>`. Repeated subscript labels in one
operand take the diagonal. For example, ``np.einsum('ii', a)`` is equivalent
to :py:func:`np.trace(a) <np.trace>`.
In *implicit mode*, the chosen subscripts are important
since the axes of the output are reordered alphabetically. This
means that ``np.einsum('ij', a)`` doesn't affect a 2D array, while
``np.einsum('ji', a)`` takes its transpose. Additionally,
``np.einsum('ij,jk', a, b)`` returns a matrix multiplication, while,
``np.einsum('ij,jh', a, b)`` returns the transpose of the
multiplication since subscript 'h' precedes subscript 'i'.
In *explicit mode* the output can be directly controlled by
specifying output subscript labels. This requires the
identifier '->' as well as the list of output subscript labels.
This feature increases the flexibility of the function since
summing can be disabled or forced when required. The call
``np.einsum('i->', a)`` is like :py:func:`np.sum(a, axis=-1) <np.sum>`,
and ``np.einsum('ii->i', a)`` is like :py:func:`np.diag(a) <np.diag>`.
The difference is that `einsum` does not allow broadcasting by default.
Additionally ``np.einsum('ij,jh->ih', a, b)`` directly specifies the
order of the output subscript labels and therefore returns matrix
multiplication, unlike the example above in implicit mode.
To enable and control broadcasting, use an ellipsis. Default
NumPy-style broadcasting is done by adding an ellipsis
to the left of each term, like ``np.einsum('...ii->...i', a)``.
To take the trace along the first and last axes,
you can do ``np.einsum('i...i', a)``, or to do a matrix-matrix
product with the left-most indices instead of rightmost, one can do
``np.einsum('ij...,jk...->ik...', a, b)``.
When there is only one operand, no axes are summed, and no output
parameter is provided, a view into the operand is returned instead
of a new array. Thus, taking the diagonal as ``np.einsum('ii->i', a)``
produces a view.
The ``optimize`` argument which will optimize the contraction order
of an einsum expression. For a contraction with three or more operands this
can greatly increase the computational efficiency at the cost of a larger
memory footprint during computation.
Typically a 'greedy' algorithm is applied which empirical tests have shown
returns the optimal path in the majority of cases. 'optimal' is not supported
for now.
.. note::
This function differs from the original `numpy.einsum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html>`_ in
the following way(s):
* Does not support 'optimal' strategy
* Does not support the alternative subscript like
`einsum(op0, sublist0, op1, sublist1, ..., [sublistout])`
* Does not produce view in any cases
Examples
--------
>>> a = np.arange(25).reshape(5,5)
>>> b = np.arange(5)
>>> c = np.arange(6).reshape(2,3)
Trace of a matrix:
>>> np.einsum('ii', a)
array(60.)
Extract the diagonal (requires explicit form):
>>> np.einsum('ii->i', a)
array([ 0., 6., 12., 18., 24.])
Sum over an axis (requires explicit form):
>>> np.einsum('ij->i', a)
array([ 10., 35., 60., 85., 110.])
>>> np.sum(a, axis=1)
array([ 10., 35., 60., 85., 110.])
For higher dimensional arrays summing a single axis can be done with ellipsis:
>>> np.einsum('...j->...', a)
array([ 10., 35., 60., 85., 110.])
Compute a matrix transpose, or reorder any number of axes:
>>> np.einsum('ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.einsum('ij->ji', c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
>>> np.transpose(c)
array([[0., 3.],
[1., 4.],
[2., 5.]])
Vector inner products:
>>> np.einsum('i,i', b, b)
array(30.)
Matrix vector multiplication:
>>> np.einsum('ij,j', a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.dot(a, b)
array([ 30., 80., 130., 180., 230.])
>>> np.einsum('...j,j', a, b)
array([ 30., 80., 130., 180., 230.])
Broadcasting and scalar multiplication:
>>> np.einsum('..., ...', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.einsum(',ij', np.array(3), c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
>>> np.multiply(3, c)
array([[ 0., 3., 6.],
[ 9., 12., 15.]])
Vector outer product:
>>> np.einsum('i,j', np.arange(2)+1, b)
array([[0., 1., 2., 3., 4.],
[0., 2., 4., 6., 8.]])
Tensor contraction:
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
array([[4400., 4730.],
[4532., 4874.],
[4664., 5018.],
[4796., 5162.],
[4928., 5306.]])
Example of ellipsis use:
>>> a = np.arange(6).reshape((3,2))
>>> b = np.arange(12).reshape((4,3))
>>> np.einsum('ki,jk->ij', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('ki,...k->i...', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
>>> np.einsum('k...,jk', a, b)
array([[10., 28., 46., 64.],
[13., 40., 67., 94.]])
Chained array operations. For more complicated contractions, speed ups
might be achieved by repeatedly computing a 'greedy' path. Performance
improvements can be particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
# Basic `einsum`: ~42.22ms (benchmarked on 3.4GHz Intel Xeon.)
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
# Greedy `einsum` (faster optimal path approximation): ~0.117ms
>>> for iteration in range(500):
... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=True)
"""
return _mx_nd_np.einsum(*operands, **kwargs)
@set_module('mxnet.numpy')
def insert(arr, obj, values, axis=None):
r"""Insert values along the given axis before the given indices.
Parameters
----------
arr : ndarray
Input array.
obj : int, slice or ndarray of int64
Object that defines the index or indices before which `values` is
inserted.
Support for multiple insertions when `obj` is a single scalar or a
sequence with one element (only support int32 and int64 element).
values : ndarray
Values to insert into `arr`.
If the type of values is different from that of arr, values is converted
to the type of arr.
axis : int, optional
Axis along which to insert `values`. If `axis` is None then `arr`
is flattened first.
Returns
-------
out : ndarray
A copy of `arr` with `values` inserted. Note that `insert`
does not occur in-place: a new array is returned. If
`axis` is None, `out` is a flattened array.
.. note::
* Note that for higher dimensional inserts `obj=0` behaves very different
from `obj=[0]` just like `arr[:,0,:] = values` is different from
`arr[:,[0],:] = values`.
* If obj is a ndarray, it's dtype only supports int64
Examples
--------
>>> a = np.array([[1, 1], [2, 2], [3, 3]])
>>> a
array([[1., 1.],
[2., 2.],
[3., 3.]])
>>> np.insert(a, 1, np.array(5))
array([1., 5., 1., 2., 2., 3., 3.])
>>> np.insert(a, 1, np.array(5), axis=1)
array([[1., 5., 1.],
[2., 5., 2.],
[3., 5., 3.]])
Difference between sequence and scalars:
>>> np.insert(a, np.array([1], dtype=np.int64), np.array([[1],[2],[3]]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> np.insert(a, 1, np.array([1, 2, 3]), axis=1)
array([[1., 1., 1.],
[2., 2., 2.],
[3., 3., 3.]])
>>> b = a.flatten()
>>> b
array([1., 1., 2., 2., 3., 3.])
>>> np.insert(b, np.array([2, 2], dtype=np.int64), np.array([5, 6]))
array([1., 1., 5., 6., 2., 2., 3., 3.])
>>> np.insert(b, slice(2, 4), np.array([5, 6]))
array([1., 1., 5., 2., 6., 2., 3., 3.])
# type casting
>>> np.insert(b.astype(np.int32), np.array([2, 2],dtype='int64'), np.array([7.13, False]))
array([1, 1, 7, 0, 2, 2, 3, 3], dtype=int32)
>>> x = np.arange(8).reshape(2, 4)
>>> idx = np.array([1, 3], dtype=np.int64)
>>> np.insert(x, idx, np.array([999]), axis=1)
array([[ 0., 999., 1., 2., 999., 3.],
[ 4., 999., 5., 6., 999., 7.]])
"""
return _mx_nd_np.insert(arr, obj, values, axis=axis)
@set_module('mxnet.numpy')
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always returned in
row-major, C-style order.
To group the indices by element, rather than dimension, use `argwhere`,
which returns a row for each non-zero element.
Parameters
----------
a : ndarray
Input array.
Returns
-------
tuple_of_arrays : tuple
Indices of elements that are non-zero.
See Also
--------
ndarray.nonzero :
Equivalent ndarray method.
Notes
-----
While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
will correctly handle 0-d arrays.
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
>>> x
array([[3, 0, 0],
[0, 4, 0],
[5, 6, 0]], dtype=int32)
>>> np.nonzero(x)
(array([0, 1, 2, 2], dtype=int64), array([0, 1, 0, 1], dtype=int64))
>>> x[np.nonzero(x)]
array([3, 4, 5, 6])
>>> np.transpose(np.stack(np.nonzero(x)))
array([[0, 0],
[1, 1],
[2, 0],
[2, 1]], dtype=int64)
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
boolean array and since False is interpreted as 0, np.nonzero(a > 3)
yields the indices of the `a` where the condition is true.
>>> a = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
>>> a > 3
array([[False, False, False],
[ True, True, True],
[ True, True, True]])
>>> np.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
Using this result to index `a` is equivalent to using the mask directly:
>>> a[np.nonzero(a > 3)]
array([4, 5, 6, 7, 8, 9], dtype=int32)
>>> a[a > 3]
array([4, 5, 6, 7, 8, 9], dtype=int32)
``nonzero`` can also be called as a method of the array.
>>> (a > 3).nonzero()
(array([1, 1, 1, 2, 2, 2], dtype=int64), array([0, 1, 2, 0, 1, 2], dtype=int64))
"""
return _mx_nd_np.nonzero(a)
@set_module('mxnet.numpy')
def percentile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""
Compute the q-th percentile of the data along the specified axis.
Returns the q-th percentile(s) of the array elements.
Parameters
----------
a : array_like
Input array
q : array_like
Percentile or sequence of percentiles to compute.
axis : {int, tuple of int, None}, optional
Axis or axes along which the percentiles are computed. The default is to
compute the percentile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must have the same
shape and buffer length as the expected output, but the type (of the output)
will be cast if necessary.
overwrite_input : bool, optional (Not supported yet)
If True, then allow the input array a to be modified by intermediate calculations,
to save memory. In this case, the contents of the input a after this function
completes is undefined.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use when the
desired percentile lies between two data points i < j:
'linear': i + (j - i) * fraction, where fraction is the fractional part of the
index surrounded by i and j.
'lower': i.
'higher': j.
'nearest': i or j, whichever is nearest.
'midpoint': (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the original array a.
Returns
-------
percentile : scalar or ndarray
Output array.
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.percentile(a, np.array(50))
array(3.5)
>>> np.percentile(a, np.array(50), axis=0)
array([6.5, 4.5, 2.5])
>>> np.percentile(a, np.array(50), axis=1)
array([7., 2.])
>>> np.percentile(a, np.array(50), axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.percentile(a, np.array(50), axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, np.array(50), axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> m
array([6.5, 4.5, 2.5])
"""
return _mx_nd_np.percentile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@set_module('mxnet.numpy')
def median(a, axis=None, out=None, overwrite_input=None, keepdims=False):
r"""Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : {int, sequence of int, None}, optional
Axis or axes along which the medians are computed. The default
is to compute the median along a flattened version of the array.
A sequence of axes is supported since version 1.9.0.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the original `arr`.
Returns
-------
median : ndarray
A new array holding the result. If the input contains integers
or floats smaller than ``float32``, then the output data-type is
``np.float32``. Otherwise, the data-type of the output is the
same as that of the input. If `out` is specified, that array is
returned instead.
See Also
--------
mean, percentile
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10, 7, 4],
[ 3, 2, 1]])
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
array([6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
array([7., 2.])
"""
return _mx_nd_np.median(a, axis=axis, overwrite_input=overwrite_input,
keepdims=keepdims, out=out)
@set_module('mxnet.numpy')
def quantile(a, q, axis=None, out=None, overwrite_input=None, interpolation='linear', keepdims=False): # pylint: disable=too-many-arguments
"""Compute the q-th quantile of the data along the specified axis.
New in version 1.15.0.
Parameters
----------
a : ndarray
Input array or object that can be converted to an array.
q : ndarray
Quantile or sequence of quantiles to compute, which must be between 0 and 1 inclusive.
axis : {int, tuple of int, None}, optional
Axis or axes along which the quantiles are computed.
The default is to compute the quantile(s) along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result.
It must have the same shape and buffer length as the expected output,
but the type (of the output) will be cast if necessary.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use
when the desired quantile lies between two data points i < j:
* linear: i + (j - i) * fraction, where fraction is the fractional part of the index surrounded by i and j.
* lower: i.
* higher: j.
* nearest: i or j, whichever is nearest.
* midpoint: (i + j) / 2.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as dimensions with size one.
With this option, the result will broadcast correctly against the original array a.
Returns
-------
quantile : ndarray
If q is a single quantile and axis=None, then the result is a scalar.
If multiple quantiles are given, first axis of the result corresponds to the quantiles.
The other axes are the axes that remain after the reduction of a.
If out is specified, that array is returned instead.
See also
--------
mean
.. note::
Given a vector V of length N, the q-th quantile of V is the value q of the way from the minimum
to the maximum in a sorted copy of V. The values and distances of the two nearest neighbors
as well as the interpolation parameter will determine the quantile if the normalized ranking
does not match the location of q exactly. This function is the same as the median if q=0.5,
the same as the minimum if q=0.0 and the same as the maximum if q=1.0.
This function differs from the original `numpy.quantile
<https://numpy.org/devdocs/reference/generated/numpy.quantile.html>`_ in
the following aspects:
* q must be ndarray type even if it is a scalar
* do not support overwrite_input
Examples
--------
>>> a = np.array([[10, 7, 4], [3, 2, 1]])
>>> a
array([[10., 7., 4.],
[3., 2., 1.]])
>>> q = np.array(0.5)
>>> q
array(0.5)
>>> np.quantile(a, q)
array(3.5)
>>> np.quantile(a, q, axis=0)
array([6.5, 4.5, 2.5])
>>> np.quantile(a, q, axis=1)
array([7., 2.])
>>> np.quantile(a, q, axis=1, keepdims=True)
array([[7.],
[2.]])
>>> m = np.quantile(a, q, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, q, axis=0, out=out)
array([6.5, 4.5, 2.5])
>>> out
array([6.5, 4.5, 2.5])
"""
return _mx_nd_np.quantile(a, q, axis=axis, out=out, overwrite_input=overwrite_input,
interpolation=interpolation, keepdims=keepdims)
@set_module('mxnet.numpy')
def shares_memory(a, b, max_work=None):
"""
Determine if two arrays share memory
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
may_share_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
.. note::
This function differs from the original `numpy.shares_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.shares_memory.html>`_ in
the following way(s):
* Does not support `max_work`, it is a dummy argument
* Actually it is same as `may_share_memory` in MXNet np
"""
return _mx_nd_np.shares_memory(a, b, max_work)
@set_module('mxnet.numpy')
def may_share_memory(a, b, max_work=None):
"""
Determine if two arrays might share memory
A return of True does not necessarily mean that the two arrays
share any element. It just means that they *might*.
Only the memory bounds of a and b are checked by default.
Parameters
----------
a, b : ndarray
Input arrays
Returns
-------
out : bool
See Also
--------
shares_memory
Examples
--------
>>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
False
>>> x = np.zeros([3, 4])
>>> np.may_share_memory(x[:,0], x[:,1])
True
.. note::
This function differs from the original `numpy.may_share_memory
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.may_share_memory.html>`_ in
the following way(s):
* Does not support `max_work`, it is a dummy argument
* Actually it is same as `shares_memory` in MXNet np
"""
return _mx_nd_np.may_share_memory(a, b, max_work)
@set_module('mxnet.numpy')
def diff(a, n=1, axis=-1, prepend=None, append=None): # pylint: disable=redefined-outer-name
r"""
Calculate the n-th discrete difference along the given axis.
Parameters
----------
a : ndarray
Input array
n : int, optional
The number of times values are differenced. If zero, the input is returned as-is.
axis : int, optional
The axis along which the difference is taken, default is the last axis.
prepend, append : ndarray, optional
Not supported yet
Returns
-------
diff : ndarray
The n-th differences.
The shape of the output is the same as a except along axis where the dimension is smaller by n.
The type of the output is the same as the type of the difference between any two elements of a.
This is the same as the type of a in most cases.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.diff(x)
array([ 1, 2, 3, -7])
>>> np.diff(x, n=2)
array([ 1, 1, -10])
>>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]])
>>> np.diff(x)
array([[2, 3, 4],
[5, 1, 2]])
>>> np.diff(x, axis=0)
array([[-1, 2, 0, -2]])
Notes
-----
Optional inputs `prepend` and `append` are not supported yet
"""
if (prepend or append):
raise NotImplementedError('prepend and append options are not supported yet')
return _mx_nd_np.diff(a, n=n, axis=axis)
@set_module('mxnet.numpy')
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
Parameters
----------
ary : ndarray
If necessary, will be flattened before the differences are taken.
to_end : ndarray or scalar, optional
Number(s) to append at the end of the returned differences.
to_begin : ndarray or scalar, optional
Number(s) to prepend at the beginning of the returned differences.
Returns
-------
ediff1d : ndarray
The differences. Loosely, this is ``ary.flat[1:] - ary.flat[:-1]``.
Examples
--------
>>> x = np.array([1, 2, 4, 7, 0])
>>> np.ediff1d(x)
array([ 1., 2., 3., -7.])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
rray([-99., 1., 2., 3., -7., 88., 99.])
The returned array is always 1D.
>>> y = np.array([[1, 2, 4], [1, 6, 24]])
>>> np.ediff1d(y)
array([ 1., 2., -3., 5., 18.])
>>> np.ediff1d(x, to_begin=y)
array([ 1., 2., 4., 1., 6., 24., 1., 2., 3., -7.])
"""
return _mx_nd_np.ediff1d(ary, to_end=to_end, to_begin=to_begin)
@set_module('mxnet.numpy')
def resize(a, new_shape):
"""
Return a new array with the specified shape.
If the new array is larger than the original array, then the new
array is filled with repeated copies of `a`. Note that this behavior
is different from a.resize(new_shape) which fills with zeros instead
of repeated copies of `a`.
Parameters
----------
a : ndarray
Array to be resized.
new_shape : int or tuple of int
Shape of resized array.
Returns
-------
reshaped_array : ndarray
The new array is formed from the data in the old array, repeated
if necessary to fill out the required number of elements. The
data are repeated in the order that they are stored in memory.
See Also
--------
ndarray.resize : resize an array in-place.
Notes
-----
Warning: This functionality does **not** consider axes separately,
i.e. it does not apply interpolation/extrapolation.
It fills the return array with the required number of elements, taken
from `a` as they are laid out in memory, disregarding strides and axes.
(This is in case the new shape is smaller. For larger, see above.)
This functionality is therefore not suitable to resize images,
or data where each axis represents a separate and distinct entity.
Examples
--------
>>> a = np.array([[0, 1], [2, 3]])
>>> np.resize(a, (2, 3))
array([[0., 1., 2.],
[3., 0., 1.]])
>>> np.resize(a, (1, 4))
array([[0., 1., 2., 3.]])
>>> np.resize(a,(2, 4))
array([[0., 1., 2., 3.],
[0., 1., 2., 3.]])
"""
return _mx_nd_np.resize(a, new_shape)
@set_module('mxnet.numpy')
def interp(x, xp, fp, left=None, right=None, period=None): # pylint: disable=too-many-arguments
r"""One-dimensional linear interpolation.
Returns the one-dimensional piecewise linear interpolant to a function
with given values at discrete data-points.
Parameters
----------
x : ndarray
The x-coordinates of the interpolated values.
xp : 1-D array of floats
The x-coordinates of the data points, must be increasing if argument
`period` is not specified. Otherwise, `xp` is internally sorted after
normalizing the periodic boundaries with ``xp = xp % period``.
fp : 1-D array of floats
The y-coordinates of the data points, same length as `xp`.
left : optional float corresponding to fp
Value to return for `x < xp[0]`, default is `fp[0]`.
right : optional float corresponding to fp
Value to return for `x > xp[-1]`, default is `fp[-1]`.
period : None or float, optional
A period for the x-coordinates. This parameter allows the proper
interpolation of angular x-coordinates. Parameters `left` and `right`
are ignored if `period` is specified.
Returns
-------
y : float (corresponding to fp) or ndarray
The interpolated values, same shape as `x`.
Raises
------
ValueError
If `xp` and `fp` have different length
If `xp` or `fp` are not 1-D sequences
If `period == 0`
.. note::
Does not check that the x-coordinate sequence `xp` is increasing.
If `xp` is not increasing, the results are nonsense.
A simple check for increasing is::
np.all(np.diff(xp) > 0)
Examples
--------
>>> xp = [1, 2, 3]
>>> fp = [3, 2, 0]
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
array([ 3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
Plot an interpolant to the sine function:
>>> x = np.linspace(0, 2*np.pi, 10)
>>> y = np.sin(x)
>>> xvals = np.linspace(0, 2*np.pi, 50)
>>> yinterp = np.interp(xvals, x, y)
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.plot(xvals, yinterp, '-x')
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.show()
Interpolation with periodic x-coordinates:
>>> x = [-180, -170, -185, 185, -10, -5, 0, 365]
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
"""
return _mx_nd_np.interp(x, xp, fp, left=left, right=right, period=period)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def full_like(a, fill_value, dtype=None, order='C', ctx=None, out=None): # pylint: disable=too-many-arguments
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6, dtype=int)
>>> np.full_like(x, 1)
array([1, 1, 1, 1, 1, 1], dtype=int64)
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0], dtype=int64)
>>> np.full_like(x, 0.1, dtype=np.float64)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1], dtype=float64)
>>> np.full_like(x, np.nan, dtype=np.float64)
array([nan, nan, nan, nan, nan, nan], dtype=float64)
>>> y = np.arange(6, dtype=np.float32)
>>> np.full_like(y, 0.1)
array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
return _mx_nd_np.full_like(a, fill_value=fill_value, dtype=dtype, order=order, ctx=ctx, out=out)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def zeros_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full : Return a new array of given shape filled with value.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.zeros_like(x)
array([[0., 0., 0.],
[0., 0., 0.]])
>>> np.zeros_like(x, int)
array([[0, 0, 0],
[0, 0, 0]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.zeros_like(y)
array([0., 0., 0.], dtype=float64)
"""
return _mx_nd_np.full_like(a, fill_value=0, dtype=dtype, order=order, ctx=ctx, out=ctx)
# pylint: enable=redefined-outer-name
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def ones_like(a, dtype=None, order='C', ctx=None, out=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : ndarray
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
Temporarily do not support boolean type.
order : {'C'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory. Currently only supports C order.
ctx: to specify the device, e.g. the i-th GPU.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
out : ndarray
Array of ones with the same shape and type as a.
See Also
--------
empty_like : Return an empty array with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
full_like : Return a new array with shape of input filled with value.
ones : Return a new array setting values to one.
Examples
--------
>>> x = np.arange(6)
>>> x = x.reshape((2, 3))
>>> x
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.ones_like(x)
array([[1., 1., 1.],
[1., 1., 1.]])
>>> np.ones_like(x, int)
array([[1, 1, 1],
[1, 1, 1]], dtype=int64)
>>> y = np.arange(3, dtype=float)
>>> y
array([0., 1., 2.], dtype=float64)
>>> np.ones_like(y)
array([1., 1., 1.], dtype=float64)
"""
return _mx_nd_np.full_like(a, fill_value=1, dtype=dtype, order=order, ctx=ctx, out=out)
# pylint: enable=redefined-outer-name
@set_module('mxnet.numpy')
def fill_diagonal(a, val, wrap=False):
"""
Fill the main diagonal of the given array of any dimensionality.
For an array `a` with ``a.ndim >= 2``, the diagonal is the list of
locations with indices ``a[i, ..., i]`` all identical. This function
modifies the input array in-place, it does not return a value.
Parameters
----------
a : array, at least 2-D.
Array whose diagonal is to be filled, it gets modified in-place.
val : scalar
Value to be written on the diagonal, its type must be compatible with
that of the array a.
wrap : bool
For tall matrices in NumPy version up to 1.6.2, the
diagonal "wrapped" after N columns. You can have this behavior
with this option. This affects only tall matrices.
Examples
--------
>>> a = np.zeros((3, 3), int)
>>> np.fill_diagonal(a, 5)
>>> a
array([[5, 0, 0],
[0, 5, 0],
[0, 0, 5]])
The same function can operate on a 4-D array:
>>> a = np.zeros((3, 3, 3, 3), int)
>>> np.fill_diagonal(a, 4)
We only show a few blocks for clarity:
>>> a[0, 0]
array([[4, 0, 0],
[0, 0, 0],
[0, 0, 0]])
>>> a[1, 1]
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 0]])
>>> a[2, 2]
array([[0, 0, 0],
[0, 0, 0],
[0, 0, 4]])
The wrap option affects only tall matrices:
>>> # tall matrices no wrap
>>> a = np.zeros((5, 3), int)
>>> np.fill_diagonal(a, 4)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[0, 0, 0]])
>>> # tall matrices wrap
>>> a = np.zeros((5, 3), int)
>>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0],
[0, 4, 0],
[0, 0, 4],
[0, 0, 0],
[4, 0, 0]])
>>> # wide matrices
>>> a = np.zeros((3, 5), int)
>>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
The anti-diagonal can be filled by reversing the order of elements
using either `numpy.flipud` or `numpy.fliplr`.
>>> a = np.zeros((3, 3), int);
>>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
>>> a
array([[0, 0, 1],
[0, 2, 0],
[3, 0, 0]])
>>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
>>> a
array([[0, 0, 3],
[0, 2, 0],
[1, 0, 0]])
Note that the order in which the diagonal is filled varies depending
on the flip function.
"""
_mx_nd_np.fill_diagonal(a, val=val, wrap=wrap)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None, **kwargs):
"""
Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
If `x` is inexact, NaN is replaced by zero or by the user defined value in
`nan` keyword, infinity is replaced by the largest finite floating point
values representable by ``x.dtype`` or by the user defined value in
`posinf` keyword and -infinity is replaced by the most negative finite
floating point values representable by ``x.dtype`` or by the user defined
value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
If `x` is not inexact, then no replacements are made.
Parameters
----------
x : scalar
ndarray
Input data.
copy : bool, optional
Whether to create a copy of `x` (True) or to replace values
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
Gluon does not support copy = False.
nan : int, float, optional
Value to be used to fill NaN values. If no value is passed
then NaN values will be replaced with 0.0.
posinf : int, float, optional
Value to be used to fill positive infinity values. If no value is
passed then positive infinity values will be replaced with a very
large number.
neginf : int, float, optional
Value to be used to fill negative infinity values. If no value is
passed then negative infinity values will be replaced with a very
small (or negative) number.
.. versionadded:: 1.13
Returns
-------
out : ndarray
`x`, with the non-finite values replaced. If `copy` is False, this may
be `x` itself.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754). This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.nan_to_num(np.inf)
1.7976931348623157e+308
>>> np.nan_to_num(-np.inf)
-1.7976931348623157e+308
>>> np.nan_to_num(np.nan)
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
array([ 3.4028235e+38, -3.4028235e+38, 0.0000000e+00, -1.2800000e+02,
1.2800000e+02])
>>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
array([ 3.3333332e+07, 3.3333332e+07, -9.9990000e+03, -1.2800000e+02,
1.2800000e+02])
>>> y = np.array([[-1, 0, 1],[9999,234,-14222]],dtype="float64")/0
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y)
array([[-1.79769313e+308, 0.00000000e+000, 1.79769313e+308],
[ 1.79769313e+308, 1.79769313e+308, -1.79769313e+308]], dtype=float64)
>>> np.nan_to_num(y, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-inf, nan, inf],
[ inf, inf, -inf]], dtype=float64)
>>> np.nan_to_num(y, copy=False, nan=111111, posinf=222222)
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
>>> y
array([[-1.79769313e+308, 1.11111000e+005, 2.22222000e+005],
[ 2.22222000e+005, 2.22222000e+005, -1.79769313e+308]], dtype=float64)
"""
return _mx_nd_np.nan_to_num(x, copy=copy, nan=nan, posinf=posinf, neginf=neginf)
@set_module('mxnet.numpy')
def squeeze(x, axis=None):
r"""Remove single-dimensional entries from the shape of an array.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Selects a subset of the single-dimensional entries in the
shape. If an axis is selected with shape entry greater than
one, an error is raised.
Returns
-------
squeezed : ndarray
The input array, but with all or a subset of the
dimensions of length 1 removed. This is always `a` itself
or a view into `a`.
Raises
------
ValueError
If `axis` is not `None`, and an axis being squeezed is not of length 1
See Also
--------
expand_dims : The inverse operation, adding singleton dimensions
reshape : Insert, remove, and combine dimensions, and resize existing ones
Examples
--------
>>> x = np.array([[[0], [1], [2]]])
>>> x.shape
(1, 3, 1)
>>> np.squeeze(x).shape
(3,)
>>> np.squeeze(x, axis=0).shape
(3, 1)
>>> np.squeeze(x, axis=1).shape
Traceback (most recent call last):
...
ValueError: cannot select an axis to squeeze out which has size not equal to one
>>> np.squeeze(x, axis=2).shape
(1, 3)
"""
return _mx_nd_np.squeeze(x, axis=axis)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isnan(x, out=None, **kwargs):
"""
Test element-wise for NaN and return result as a boolean array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is NaN, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
.. note::
This function differs from the original `numpy.isinf
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
* Does not support complex number for now
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> np.isnan(np.nan)
True
>>> np.isnan(np.inf)
False
>>> np.isnan(np.array([np.log(-1.),1.,np.log(0)]))
array([ True, False, False])
"""
return _mx_nd_np.isnan(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isinf(x, out=None, **kwargs):
"""
Test element-wise for positive or negative infinity.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive or negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
.. note::
This function differs from the original `numpy.isnan
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.isnan.html>`_ in
the following aspects:
* Does not support complex number for now
* Input type does not support Python native iterables(list, tuple, ...).
* ``out`` param: cannot perform auto broadcasting. ``out`` ndarray's shape must be
the same as the expected output.
* ``out`` param: cannot perform auto type cast. ``out`` ndarray's dtype must be the
same as the expected output.
* ``out`` param does not support scalar input case.
Examples
--------
>>> np.isinf(np.inf)
True
>>> np.isinf(np.nan)
False
>>> np.isinf(np.array([np.inf, -np.inf, 1.0, np.nan]))
array([ True, True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool_)
>>> np.isinf(x, y)
array([ True, False, True])
>>> y
array([ True, False, True])
"""
return _mx_nd_np.isinf(x, out=out, **kwargs)
@set_module('mxnet.ndarray.numpy')
@wrap_np_unary_func
def isposinf(x, out=None, **kwargs):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is positive infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isposinf(np.inf)
True
>>> np.isposinf(-np.inf)
False
>>> np.isposinf(np.nan)
False
>>> np.isposinf(np.array([-np.inf, 0., np.inf]))
array([False, False, True])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isposinf(x, y)
array([False, False, True])
>>> y
array([False, False, True])
"""
return _mx_nd_np.isposinf(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isneginf(x, out=None, **kwargs):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Examples
--------
>>> np.isneginf(-np.inf)
True
>>> np.isneginf(np.inf)
False
>>> np.isneginf(float('-inf'))
True
>>> np.isneginf(np.array([-np.inf, 0., np.inf]))
array([ True, False, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isneginf(x, y)
array([ True, False, False])
>>> y
array([ True, False, False])
"""
return _mx_nd_np.isneginf(x, out=out, **kwargs)
@set_module('mxnet.numpy')
@wrap_np_unary_func
def isfinite(x, out=None, **kwargs):
"""
Test element-wise for finiteness (not infinity or not Not a Number).
Parameters
----------
x : ndarray
Input array.
out : ndarray or None, optional
A location into which the result is stored.
If provided, it must have the same shape and dtype as input ndarray.
If not provided or `None`, a freshly-allocated array is returned.
Returns
-------
y : ndarray or bool
True where x is negative infinity, false otherwise.
This is a scalar if x is a scalar.
Notes
-----
Not a Number, positive infinity and negative infinity are considered to be non-finite.
NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic (IEEE 754).
This means that Not a Number is not equivalent to infinity.
Also that positive infinity is not equivalent to negative infinity.
But infinity is equivalent to positive infinity. Errors result if the second argument
is also supplied when x is a scalar input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isfinite(1)
True
>>> np.isfinite(0)
True
>>> np.isfinite(np.nan)
False
>>> np.isfinite(np.inf)
False
>>> np.isfinite(-np.inf)
False
>>> np.isfinite(np.array([np.log(-1.),1.,np.log(0)]))
array([False, True, False])
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([True, True, True], dtype=np.bool)
>>> np.isfinite(x, y)
array([False, True, False])
>>> y
array([False, True, False])
"""
return _mx_nd_np.isfinite(x, out=out, **kwargs)
@set_module('mxnet.numpy')
def where(condition, x=None, y=None):
"""where(condition, [x, y])
Return elements chosen from `x` or `y` depending on `condition`.
.. note::
When only `condition` is provided, this function is a shorthand for
``np.asarray(condition).nonzero()``. The rest of this documentation
covers only the case where all three arguments are provided.
Parameters
----------
condition : ndarray
Where True, yield `x`, otherwise yield `y`.
x, y : ndarray
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape. `x` and `y` must have the same dtype.
Returns
-------
out : ndarray
An array with elements from `x` where `condition` is True, and elements
from `y` elsewhere.
Notes
-----
If all the arrays are 1-D, `where` is equivalent to::
[xv if c else yv
for c, xv, yv in zip(condition, x, y)]
Examples
--------
>>> a = np.arange(10)
>>> a
array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.where(a < 5, a, 10*a)
array([ 0., 1., 2., 3., 4., 50., 60., 70., 80., 90.])
This can be used on multidimensional arrays too:
>>> cond = np.array([[True, False], [True, True]])
>>> x = np.array([[1, 2], [3, 4]])
>>> y = np.array([[9, 8], [7, 6]])
>>> np.where(cond, x, y)
array([[1., 8.],
[3., 4.]])
The shapes of x, y, and the condition are broadcast together:
>>> x, y = onp.ogrid[:3, :4]
>>> x = np.array(x)
>>> y = np.array(y)
>>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
array([[10, 0, 0, 0],
[10, 11, 1, 1],
[10, 11, 12, 2]], dtype=int64)
>>> a = np.array([[0, 1, 2],
... [0, 2, 4],
... [0, 3, 6]])
>>> np.where(a < 4, a, -1) # -1 is broadcast
array([[ 0., 1., 2.],
[ 0., 2., -1.],
[ 0., 3., -1.]])
"""
return _mx_nd_np.where(condition, x, y)
@set_module('mxnet.numpy')
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If p is of length N, this function returns the value:
p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]
If x is a sequence, then p(x) is returned for each element of x.
If x is another polynomial then the composite polynomial p(x(t)) is returned.
Parameters
----------
p : ndarray
1D array of polynomial coefficients (including coefficients equal to zero)
from highest degree to the constant term.
x : ndarray
An array of numbers, at which to evaluate p.
Returns
-------
values : ndarray
Result array of polynomials
.. note::
This function differs from the original `numpy.polyval
<https://numpy.org/devdocs/reference/generated/numpy.polyval.html>`_ in
the following way(s):
* Does not support poly1d.
* X should be ndarray type even if it contains only one element.
Examples
--------
>>> p = np.array([3, 0, 1])
array([3., 0., 1.])
>>> x = np.array([5])
array([5.])
>>> np.polyval(p, x) # 3 * 5**2 + 0 * 5**1 + 1
array([76.])
>>> x = np.array([5, 4])
array([5., 4.])
>>> np.polyval(p, x)
array([76., 49.])
"""
return _mx_nd_np.polyval(p, x)
@set_module('mxnet.numpy')
def bincount(x, weights=None, minlength=0):
"""
Count number of occurrences of each value in array of non-negative ints.
Parameters
----------
x : ndarray
input array, 1 dimension, nonnegative ints.
weights: ndarray
input weigths same shape as x. (Optional)
minlength: int
A minimum number of bins for the output. (Optional)
Returns
--------
out : ndarray
the result of binning the input array. The length of out is equal to amax(x)+1.
Raises
--------
Value Error
If the input is not 1-dimensional, or contains elements with negative values,
or if minlength is negative
TypeError
If the type of the input is float or complex.
Examples
--------
>>> np.bincount(np.arange(5))
array([1, 1, 1, 1, 1])
>>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
array([1, 3, 1, 1, 0, 0, 0, 1])
>>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
>>> np.bincount(x).size == np.amax(x)+1
True
>>> np.bincount(np.arange(5, dtype=float))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: array cannot be safely cast to required type
>>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
>>> x = np.array([0, 1, 1, 2, 2, 2])
>>> np.bincount(x, weights=w)
array([ 0.3, 0.7, 1.1])
"""
return _mx_nd_np.bincount(x, weights=weights, minlength=minlength)
@set_module('mxnet.numpy')
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst higher-dimensional inputs are preserved.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 1. Copies are made only if necessary.
See also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[0., 1., 2.],
[3., 4., 5.],
[6., 7., 8.]])
>>> np.atleast_1d(np.array(1), np.array([3, 4]))
[array([1.]), array([3., 4.])]
"""
res = []
for ary in arys:
if not isinstance(ary, NDArray):
ary = array(ary)
res.append(ary)
return _mx_nd_np.atleast_1d(*res)
@set_module('mxnet.numpy')
def atleast_2d(*arys):
"""
Convert inputs to arrays with at least two dimensions.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 2. Copies are made only if necessary.
See also
--------
atleast_1d, atleast_3d
Examples
--------
>>> np.atleast_2d(3.0)
array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
array([[0., 1., 2.]])
>>> np.atleast_2d(np.array(1), np.array([1, 2]), np.array([[1, 2]]))
[array([[1.]]), array([[1., 2.]]), array([[1., 2.]])]
"""
res = []
for ary in arys:
if not isinstance(ary, NDArray):
ary = array(ary)
res.append(ary)
return _mx_nd_np.atleast_2d(*res)
@set_module('mxnet.numpy')
def atleast_3d(*arys):
"""
Convert inputs to arrays with at least three dimension.
Parameters
----------
arys1, arys2, ... : ndarray
One or more input arrays.
Returns
-------
ret : ndarray
An array, or list of arrays, each with a.ndim >= 3.
For example, a 1-D array of shape (N,) becomes a view of shape (1, N, 1),
and a 2-D array of shape (M, N) becomes a view of shape (M, N, 1).
See also
--------
atleast_1d, atleast_2d
Examples
--------
>>> np.atleast_3d(3.0)
array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
(1, 3, 1)
>>> x = np.arange(12.0).reshape(4,3)
>>> np.atleast_3d(x).shape
(4, 3, 1)
>>> for arr in np.atleast_3d(np.array([1, 2]), np.array([[1, 2]]), np.array([[[1, 2]]])):
... print(arr, arr.shape)
...
[[[1.]
[2.]]] (1, 2, 1)
[[[1.]
[2.]]] (1, 2, 1)
[[[1. 2.]]] (1, 1, 2)
"""
res = []
for ary in arys:
if not isinstance(ary, NDArray):
ary = array(ary)
res.append(ary)
return _mx_nd_np.atleast_3d(*res)
@set_module('mxnet.numpy')
def pad(x, pad_width=None, mode="constant", **kwargs): # pylint: disable=too-many-arguments
# pylint: disable=too-many-return-statements
"""
Pad an array.
Parameters
----------
array : array_like of rank N
The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
for each axis.
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
mode : str or function, optional
One of the following string values or a user supplied function.
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
not supported yet
'maximum'
Pads with the maximum value of all of the
vector along each axis.
'mean'
not supported yet
'median'
not supported yet
'minimum'
Pads with the minimum value of all of the
vector along each axis.
'reflect'
Pads with the reflection of the vector mirrored on
the first and last values of the vector along each
axis.
'symmetric'
Pads with the reflection of the vector mirrored
along the edge of the array.
'wrap'
not supported yet.
'empty'
not supported yet.
<function>
not supported yet.
stat_length : not supported yet
constant_values : scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
Default is 0.
end_values : not supported yet
reflect_type : {'even', 'odd'}, optional
only support even now
Returns
-------
pad : ndarray
Padded array of rank equal to `array` with shape increased
according to `pad_width`.
Examples
--------
>>> a = [1, 2, 3, 4, 5]
>>> np.pad(a, (2, 3), 'edge')
array([1, 1, 1, ..., 5, 5, 5])
>>> np.pad(a, (2, 2), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> np.pad(a, (2, 2), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = [[1, 2], [3, 4]]
>>> np.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = [1, 2, 3, 4, 5]
>>> np.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> np.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
>>> np.pad(a, ((2, 2), (2, 2)), pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
"""
return _mx_nd_np.pad(x, pad_width=pad_width, mode=mode, **kwargs)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def prod(a, axis=None, dtype=None, out=None, keepdims=False, initial=None): # pylint: disable=too-many-arguments
"""
Return the product of array elements over a given axis.
Parameters
----------
a : array_like
Input data.
axis : None or int or tuple of ints, optional
Axis or axes along which a product is performed. The default,
axis=None, will calculate the product of all the elements in the
input array. If axis is negative it counts from the last to the
first axis.
.. versionadded:: 1.7.0
If axis is a tuple of ints, a product is performed on all of the
axes specified in the tuple instead of a single axis or all the
axes as before.
dtype : dtype, optional
The type of the returned array, as well as of the accumulator in
which the elements are multiplied. The dtype of `a` is used by
default unless `a` has an integer dtype of less precision than the
default platform integer. In that case, if `a` is signed then the
platform integer is used while if `a` is unsigned then an unsigned
integer of the same precision as the platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must have
the same shape as the expected output, but the type of the output
values will be cast if necessary.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the
result as dimensions with size one. With this option, the result
will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `prod` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-class' method does not implement `keepdims` any
exceptions will be raised.
initial : scalar, optional
The starting value for this product. See `~numpy.ufunc.reduce` for details.
where : not supported
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
An array shaped as `a` but with the specified axis removed.
Returns a reference to `out` if specified.
Examples
--------
By default, calculate the product of all elements:
>>> np.prod([1.,2.])
2.0
Even when the input array is two-dimensional:
>>> np.prod([[1.,2.],[3.,4.]])
24.0
But we can also specify the axis over which to multiply:
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
Or select specific elements to include:
>>> np.prod([1., np.nan, 3.], where=[True, False, True])
3.0
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
>>> x = np.array([1, 2, 3], dtype=np.uint8)
>>> np.prod(x).dtype == np.uint
True
If `x` is of a signed integer type, then the output type
is the default platform integer:
>>> x = np.array([1, 2, 3], dtype=np.int8)
>>> np.prod(x).dtype == int
True
You can also start the product with a value other than one:
>>> np.prod([1, 2], initial=5)
10
"""
return _mx_nd_np.prod(a, axis=axis, dtype=dtype, keepdims=keepdims, initial=initial, out=out)
@set_module('mxnet.numpy')
def dot(a, b, out=None):
"""
Dot product of two arrays. Specifically,
* If both `a` and `b` are 1-D arrays, it is inner product of vectors
* If both `a` and `b` are 2-D arrays, it is matrix multiplication,
* If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
and using ``np.multiply(a, b)`` or ``a * b`` is preferred.
* If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
the last axis of `a` and `b`.
* If `a` is an N-D array and `b` is a 2-D array, it is a
sum product over the last axis of `a` and the second-to-last axis of `b`::
dot(a, b)[i,j,k] = sum(a[i,j,:] * b[:,k])
Parameters
----------
a : ndarray
First argument.
b : ndarray
Second argument.
out : ndarray, optional
Output argument. It must have the same shape and type as the expected output.
Returns
-------
output : ndarray
Returns the dot product of `a` and `b`. If `a` and `b` are both
scalars or both 1-D arrays then a scalar is returned; otherwise
an array is returned.
If `out` is given, then it is returned
Examples
--------
>>> a = np.array(3)
>>> b = np.array(4)
>>> np.dot(a, b)
array(12.)
For 2-D arrays it is the matrix product:
>>> a = np.array([[1, 0], [0, 1]])
>>> b = np.array([[4, 1], [2, 2]])
>>> np.dot(a, b)
array([[4., 1.],
[2., 2.]])
>>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
>>> b = np.arange(5*6)[::-1].reshape((6,5))
>>> np.dot(a, b)[2,3,2,2]
array(29884.)
>>> np.sum(a[2,3,2,:] * b[:,2])
array(29884.)
"""
return _mx_nd_np.dot(a, b, out=out)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
Parameters
----------
a : array_like
Input array.
axis : int, optional
Axis along which the cumulative sum is computed. The default
(None) is to compute the cumsum over the flattened array.
dtype : dtype, optional
Type of the returned array and of the accumulator in which the
elements are summed. If `dtype` is not specified, it defaults
to the dtype of `a`, unless `a` has an integer dtype with a
precision less than that of the default platform integer. In
that case, the default platform integer is used.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary. See `doc.ufuncs`
(Section "Output arguments") for more details.
Returns
-------
cumsum_along_axis : ndarray.
A new array holding the result is returned unless `out` is
specified, in which case a reference to `out` is returned. The
result has the same size as `a`, and the same shape as `a` if
`axis` is not None or `a` is a 1-d array.
Examples
--------
>>> a = np.array([[1,2,3], [4,5,6]])
>>> a
array([[1, 2, 3],
[4, 5, 6]])
>>> np.cumsum(a)
array([ 1, 3, 6, 10, 15, 21])
>>> np.cumsum(a, dtype=float) # specifies type of output value(s)
array([ 1., 3., 6., 10., 15., 21.])
>>> np.cumsum(a,axis=0) # sum over rows for each of the 3 columns
array([[1, 2, 3],
[5, 7, 9]])
>>> np.cumsum(a,axis=1) # sum over columns for each of the 2 rows
array([[ 1, 3, 6],
[ 4, 9, 15]])
"""
return _mx_nd_np.cumsum(a, axis=axis, dtype=dtype, out=out)
@set_module('mxnet.numpy')
def reshape(a, newshape, reverse, order='C'):
"""
Gives a new shape to an array without changing its data.
This function always returns a copy of the input array if
``out`` is not provided.
Parameters
----------
a : ndarray
Array to be reshaped.
newshape : int or tuple of ints
The new shape should be compatible with the original shape. If
an integer, then the result will be a 1-D array of that length.
One shape dimension can be -1. In this case, the value is
inferred from the length of the array and remaining dimensions.
order : {'C'}, optional
Read the elements of `a` using this index order, and place the
elements into the reshaped array using this index order. 'C'
means to read / write the elements using C-like index order,
with the last axis index changing fastest, back to the first
axis index changing slowest. Other order types such as 'F'/'A'
may be added in the future.
Returns
-------
reshaped_array : ndarray
It will be always a copy of the original array. This behavior is different
from the official NumPy ``reshape`` operator where views of the original array may be
generated.
See Also
--------
ndarray.reshape : Equivalent method.
Examples
--------
>>> a = np.arange(6).reshape((3, 2))
>>> a
array([[0., 1.],
[2., 3.],
[4., 5.]])
>>> np.reshape(a, (2, 3)) # C-like index ordering
array([[0., 1., 2.],
[3., 4., 5.]])
>>> np.reshape(np.ravel(a), (2, 3)) # equivalent to C ravel then C reshape
array([[0., 1., 2.],
[3., 4., 5.]])
>>> a = np.array([[1,2,3], [4,5,6]])
>>> np.reshape(a, 6)
array([1., 2., 3., 4., 5., 6.])
>>> np.reshape(a, (3,-1)) # the unspecified value is inferred to be 2
array([[1., 2.],
[3., 4.],
[5., 6.]])
"""
return _mx_nd_np.reshape(a, newshape, reverse, order)
@set_module('mxnet.numpy')
def moveaxis(a, source, destination):
"""Move axes of an array to new positions.
Other axes remain in their original order.
Parameters
----------
a : ndarray
The array whose axes should be reordered.
source : int or sequence of int
Original positions of the axes to move. These must be unique.
destination : int or sequence of int
Destination positions for each of the original axes. These must also be
unique.
Returns
-------
result : ndarray
Array with moved axes. This array is a view of the input array.
See Also
--------
transpose: Permute the dimensions of an array.
swapaxes: Interchange two axes of an array.
Examples
--------
>>> x = np.zeros((3, 4, 5))
>>> np.moveaxis(x, 0, -1).shape
(4, 5, 3)
>>> np.moveaxis(x, -1, 0).shape
(5, 3, 4)
These all achieve the same result:
>>> np.transpose(x).shape
(5, 4, 3)
>>> np.swapaxes(x, 0, -1).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1], [-1, -2]).shape
(5, 4, 3)
>>> np.moveaxis(x, [0, 1, 2], [-1, -2, -3]).shape
(5, 4, 3)
"""
return _mx_nd_np.moveaxis(a, source, destination)
@set_module('mxnet.numpy')
def copy(a): # pylint: disable=redefined-outer-name
"""
Return an array copy of the given object.
Parameters
----------
a : _Symbol
Input array.
Returns
-------
arr : _Symbol
Array interpretation of a.
-----
Examples
--------
>>> x = np.array([1, 2, 3])
>>> y = x
>>> z = np.copy(x)
>>> x[0] = 10
>>> x[0] == y[0]
True
>>> x[0] == z[0]
False
"""
return _mx_nd_np.copy(a)
# pylint: disable=redefined-outer-name
@set_module('mxnet.numpy')
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
Parameters
----------
a : ndarray
Input array.
axis : integer
The axis to roll backwards. The positions of the other axes do not
change relative to one another.
start: int, optional
The axis is rolled until it lies before this position.
The default, 0, results in a “complete” roll.
Returns
-------
res : ndarray
A view after applying rollaxis to `a` is returned.
-----
Examples
--------
>>> a = np.ones((3,4,5,6))
>>> np.rollaxis(a, 3, 1).shape
(3, 6, 4, 5)
>>> np.rollaxis(a, 2).shape
(5, 3, 4, 6)
>>> np.rollaxis(a, 1, 4).shape
(3, 5, 6, 4)
"""
return _mx_nd_np.rollaxis(a, axis, start)
@set_module('mxnet.numpy')
def diag(v, k=0):
"""
Extracts a diagonal or constructs a diagonal array.
* 1-D arrays: constructs a 2-D array with the input as its diagonal, all other elements are zero.
* 2-D arrays: extracts the k-th Diagonal
Parameters
----------
array : ndarray
The array to apply diag method.
k : offset
extracts or constructs kth diagonal given input array
Returns
----------
out : ndarray
The extracted diagonal or constructed diagonal array.
Examples
--------
>>> x = np.arange(9).reshape((3,3))
>>> x
array([[0, 1, 2],
[3, 4, 5],
[6, 7, 8]])
>>> np.diag(x)
array([0, 4, 8])
>>> np.diag(x, k=1)
array([1, 5])
>>> np.diag(x, k=-1)
array([3, 7])
>>> np.diag(np.diag(x))
array([[0, 0, 0],
[0, 4, 0],
[0, 0, 8]])
"""
return _mx_nd_np.diag(v, k=k)
@set_module('mxnet.numpy')
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
Parameters
----------
v : array_like
Input data, which is flattened and set as the `k`-th
diagonal of the output.
k : int, optional
Diagonal to set; 0, the default, corresponds to the "main" diagonal,
a positive (negative) `k` giving the number of the diagonal above
(below) the main.
Returns
-------
out : ndarray
The 2-D output array.
See Also
--------
diag : MATLAB work-alike for 1-D and 2-D arrays.
diagonal : Return specified diagonals.
trace : Sum along diagonals.
Examples
--------
>>> np.diagflat([[1,2], [3,4]])
array([[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> np.diagflat([1,2], 1)
array([[0, 1, 0],
[0, 0, 2],
[0, 0, 0]])
"""
return _mx_nd_np.diagflat(v, k=k)
@set_module('mxnet.numpy')
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
If a is 2-D, returns the diagonal of a with the given offset, i.e., the collection of elements of
the form a[i, i+offset]. If a has more than two dimensions, then the axes specified by axis1 and
axis2 are used to determine the 2-D sub-array whose diagonal is returned. The shape of the
resulting array can be determined by removing axis1 and axis2 and appending an index to the
right equal to the size of the resulting diagonals.
Parameters
----------
a : ndarray
Input data from which diagonal are taken.
offset: int, Optional
Offset of the diagonal from the main diagonal
axis1: int, Optional
Axis to be used as the first axis of the 2-D sub-arrays
axis2: int, Optional
Axis to be used as the second axis of the 2-D sub-arrays
Returns
-------
out : ndarray
Output result
Raises
-------
ValueError: If the dimension of a is less than 2.
Examples
--------
>>> a = np.arange(4).reshape(2,2)
>>> a
array([[0, 1],
[2, 3]])
>>> np.diagonal(a)
array([0, 3])
>>> np.diagonal(a, 1)
array([1])
>>> a = np.arange(8).reshape(2,2,2)
>>>a
array([[[0, 1],
[2, 3]],
[[4, 5],
[6, 7]]])
>>> np.diagonal(a, 0, 0, 1)
array([[0, 6],
[1, 7]])
"""
return _mx_nd_np.diagonal(a, offset=offset, axis1=axis1, axis2=axis2)
# pylint: disable=redefined-outer-name, too-many-arguments
@set_module('mxnet.numpy')
def sum(a, axis=None, dtype=None, out=None, keepdims=None, initial=None, where=None):
r"""
Sum of array elements over a given axis.
Parameters
----------
a : ndarray
Input data.
axis : None or int, optional
Axis or axes along which a sum is performed. The default,
axis=None, will sum all of the elements of the input array. If
axis is negative it counts from the last to the first axis.
dtype : dtype, optional
The type of the returned array and of the accumulator in which the
elements are summed. The default type is float32.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
If the default value is passed, then `keepdims` will not be
passed through to the `sum` method of sub-classes of
`ndarray`, however any non-default value will be. If the
sub-classes `sum` method does not implement `keepdims` any
exceptions will be raised.
initial: Currently only supports None as input, optional
Starting value for the sum.
Currently not implemented. Please use ``None`` as input or skip this argument.
out : ndarray or None, optional
Alternative output array in which to place the result. It must have
the same shape and dtype as the expected output.
Returns
-------
sum_along_axis : ndarray
An ndarray with the same shape as `a`, with the specified
axis removed. If an output array is specified, a reference to
`out` is returned.
Notes
-----
* Input type does not support Python native iterables.
* "out" param: cannot perform auto type change. out ndarray's dtype must be the same as the expected output.
* "initial" param is not supported yet. Please use None as input.
* Arithmetic is modular when using integer types, and no error is raised on overflow.
* The sum of an empty array is the neutral element 0:
>>> a = np.empty(1)
>>> np.sum(a)
array(0.)
This function differs from the original `numpy.sum
<https://docs.scipy.org/doc/numpy/reference/generated/numpy.sum.html>`_ in
the following aspects:
* Input type does not support Python native iterables(list, tuple, ...).
* "out" param: cannot perform auto type cast. out ndarray's dtype must be the same as the expected output.
* "initial" param is not supported yet. Please use ``None`` as input or skip it.
* The default type is float32.
Examples
--------
>>> a = np.array([0.5, 1.5])
>>> np.sum(a)
array(2.)
>>> a = np.array([0.5, 0.7, 0.2, 1.5])
>>> np.sum(a, dtype=np.int32)
array(2, dtype=int32)
>>> a = np.array([[0, 1], [0, 5]])
>>> np.sum(a)
array(6.)
>>> np.sum(a, axis=0)
array([0., 6.])
>>> np.sum(a, axis=1)
array([1., 5.])
With output ndarray:
>>> a = np.array([[0, 1], [0, 5]])
>>> b = np.ones((2,), dtype=np.float32)
>>> np.sum(a, axis = 0, out=b)
array([0., 6.])
>>> b
array([0., 6.])
If the accumulator is too small, overflow occurs:
>>> np.ones(128, dtype=np.int8).sum(dtype=np.int8)
array(-128, dtype=int8)
"""
return _mx_nd_np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims, initial=initial, where=where)
# pylint: enable=redefined-outer-name, too-many-arguments
| leezu/mxnet | python/mxnet/numpy/multiarray.py | Python | apache-2.0 | 408,331 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data access object for InstanceTemplate."""
from google.cloud.security.common.data_access import dao
from google.cloud.security.common.data_access.sql_queries import select_data
from google.cloud.security.common.gcp_type import instance_template
from google.cloud.security.common.gcp_type import resource
from google.cloud.security.common.util import log_util
# TODO: The next editor must remove this disable and correct issues.
# pylint: disable=missing-type-doc,missing-return-type-doc
LOGGER = log_util.get_logger(__name__)
class InstanceTemplateDao(dao.Dao):
"""InstanceTemplate DAO."""
def get_instance_templates(self, timestamp):
"""Get instance templates from a particular snapshot.
Args:
timestamp: The snapshot timestamp.
Returns:
A list of InstanceTemplate.
Raises:
MySQLError if a MySQL error occurs.
"""
query = select_data.INSTANCE_TEMPLATES.format(timestamp)
rows = self.execute_sql_with_fetch(
resource.ResourceType.INSTANCE_TEMPLATE, query, ())
return [self.map_row_to_object(instance_template.InstanceTemplate, row)
for row in rows]
| cschnei3/forseti-security | google/cloud/security/common/data_access/instance_template_dao.py | Python | apache-2.0 | 1,775 |
"""
Por eliminar
"""
# from .models import Categoria,SubCategoria,Establecimiento
# from selectable.base import ModelLookup
# from selectable.registry import registry
# class EstablecimientoLookUp(ModelLookup):
# model = Establecimiento
# search_fields = ('nombre__icontains','email', )
# class SubCategoriaLookUp(ModelLookup):
# model = SubCategoria
# search_fields = ('tag__icontains', )
# def get_query(self, request, term):
# results = super(SubCategoriaLookUp, self).get_query(request, term)
# print request.GET
# categoria = request.GET.get('categorias', '')
# print "Categoria: ",categoria
# if categoria:
# results = results.filter(categorias=categoria)
# else:
# results = results.none()
# return results
# class CategoriaLookUp(ModelLookup):
# model = Categoria
# search_fields = ('tag__icontains', )
# registry.register(EstablecimientoLookUp)
# registry.register(SubCategoriaLookUp)
# registry.register(CategoriaLookUp) | camilortte/RecomendadorUD | apps/establishment_system/lookup.py | Python | mit | 1,055 |
from __future__ import absolute_import
import datetime
import json
import pytz
import six
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.shortcuts import redirect
from django.views.decorators.csrf import ensure_csrf_cookie
from eventtracking import tracker as eventtracker
from ipware.ip import get_ip
from edxmako.shortcuts import render_to_response
from track import contexts, shim, tracker
from track.models import TrackingLog
def log_event(event):
"""Capture a event by sending it to the register trackers"""
tracker.send(event)
def _get_request_header(request, header_name, default=''):
"""Helper method to get header values from a request's META dict, if present."""
if request is not None and hasattr(request, 'META') and header_name in request.META:
return request.META[header_name]
else:
return default
def _get_request_ip(request, default=''):
"""Helper method to get IP from a request's META dict, if present."""
if request is not None and hasattr(request, 'META'):
return get_ip(request)
else:
return default
def _get_request_value(request, value_name, default=''):
"""Helper method to get header values from a request's GET/POST dict, if present."""
if request is not None:
if request.method == 'GET':
return request.GET.get(value_name, default)
elif request.method == 'POST':
return request.POST.get(value_name, default)
return default
def _add_user_id_for_username(data):
"""
If data contains a username, adds the corresponding user_id to the data.
In certain use cases, the caller may have the username and not the
user_id. This enables us to standardize on user_id in event data,
even when the caller only has access to the username.
"""
if data and ('username' in data) and ('user_id' not in data):
try:
user = User.objects.get(username=data.get('username'))
data['user_id'] = user.id
except User.DoesNotExist:
pass
def user_track(request):
"""
Log when POST call to "event" URL is made by a user.
GET or POST call should provide "event_type", "event", and "page" arguments.
"""
try:
username = request.user.username
except:
username = "anonymous"
name = _get_request_value(request, 'event_type')
data = _get_request_value(request, 'event', {})
page = _get_request_value(request, 'page')
if isinstance(data, six.string_types) and len(data) > 0:
try:
data = json.loads(data)
_add_user_id_for_username(data)
except ValueError:
pass
context_override = contexts.course_context_from_url(page)
context_override['username'] = username
context_override['event_source'] = 'browser'
context_override['page'] = page
with eventtracker.get_tracker().context('edx.course.browser', context_override):
eventtracker.emit(name=name, data=data)
return HttpResponse('success')
def server_track(request, event_type, event, page=None):
"""
Log events related to server requests.
Handle the situation where the request may be NULL, as may happen with management commands.
"""
if event_type.startswith("/event_logs") and request.user.is_staff:
return # don't log
try:
username = request.user.username
except:
username = "anonymous"
# define output:
event = {
"username": username,
"ip": _get_request_ip(request),
"referer": _get_request_header(request, 'HTTP_REFERER'),
"accept_language": _get_request_header(request, 'HTTP_ACCEPT_LANGUAGE'),
"event_source": "server",
"event_type": event_type,
"event": event,
"agent": _get_request_header(request, 'HTTP_USER_AGENT').decode('latin1'),
"page": page,
"time": datetime.datetime.utcnow().replace(tzinfo=pytz.utc),
"host": _get_request_header(request, 'SERVER_NAME'),
"context": eventtracker.get_tracker().resolve_context(),
}
# Some duplicated fields are passed into event-tracking via the context by track.middleware.
# Remove them from the event here since they are captured elsewhere.
shim.remove_shim_context(event)
log_event(event)
def task_track(request_info, task_info, event_type, event, page=None):
"""
Logs tracking information for events occuring within celery tasks.
The `event_type` is a string naming the particular event being logged,
while `event` is a dict containing whatever additional contextual information
is desired.
The `request_info` is a dict containing information about the original
task request. Relevant keys are `username`, `ip`, `agent`, and `host`.
While the dict is required, the values in it are not, so that {} can be
passed in.
In addition, a `task_info` dict provides more information about the current
task, to be stored with the `event` dict. This may also be an empty dict.
The `page` parameter is optional, and allows the name of the page to
be provided.
"""
# supplement event information with additional information
# about the task in which it is running.
full_event = dict(event, **task_info)
# All fields must be specified, in case the tracking information is
# also saved to the TrackingLog model. Get values from the task-level
# information, or just add placeholder values.
with eventtracker.get_tracker().context('edx.course.task', contexts.course_context_from_url(page)):
event = {
"username": request_info.get('username', 'unknown'),
"ip": request_info.get('ip', 'unknown'),
"event_source": "task",
"event_type": event_type,
"event": full_event,
"agent": request_info.get('agent', 'unknown'),
"page": page,
"time": datetime.datetime.utcnow().replace(tzinfo=pytz.utc),
"host": request_info.get('host', 'unknown'),
"context": eventtracker.get_tracker().resolve_context(),
}
log_event(event)
@login_required
@ensure_csrf_cookie
def view_tracking_log(request, args=''):
"""View to output contents of TrackingLog model. For staff use only."""
if not request.user.is_staff:
return redirect('/')
nlen = 100
username = ''
if args:
for arg in args.split('/'):
if arg.isdigit():
nlen = int(arg)
if arg.startswith('username='):
username = arg[9:]
record_instances = TrackingLog.objects.all().order_by('-time')
if username:
record_instances = record_instances.filter(username=username)
record_instances = record_instances[0:nlen]
# fix dtstamp
fmt = '%a %d-%b-%y %H:%M:%S' # "%Y-%m-%d %H:%M:%S %Z%z"
for rinst in record_instances:
rinst.dtstr = rinst.time.replace(tzinfo=pytz.utc).astimezone(pytz.timezone('US/Eastern')).strftime(fmt)
return render_to_response('tracking_log.html', {'records': record_instances})
| jolyonb/edx-platform | common/djangoapps/track/views/__init__.py | Python | agpl-3.0 | 7,220 |
import sqlite3
persons = [
("Hugo", "Boss"),
("Calvin", "Klein")
]
con = sqlite3.connect(":memory:")
# Create the table
con.execute("create table person(firstname, lastname)")
# Fill the table
con.executemany("insert into person(firstname, lastname) values (?, ?)", persons)
# Print the table contents
for row in con.execute("select firstname, lastname from person"):
print row
# Using a dummy WHERE clause to not let SQLite take the shortcut table deletes.
print "I just deleted", con.execute("delete from person where 1=1").rowcount, "rows"
| MicroTrustRepos/microkernel | src/l4/pkg/python/contrib/Doc/includes/sqlite3/shortcut_methods.py | Python | gpl-2.0 | 565 |
class Duck:
def quack(self):
print("Quaaack!")
def walk(self):
print("*waddles*")
def bark(self):
print("The duck can't bark.")
def fur(self):
print("The duck has feathers")
class Dog:
def bark(self):
print("Woof!")
def fur(self):
print("The dog has blonde fur.")
def walk(self):
print("*runs around in circles*")
def quack(self):
print("The dog can't quack.")
def main():
donald = Duck()
truffle = Dog()
in_the_forest(donald)
def in_the_forest(dog):
dog.bark()
dog.fur()
def in_the_pond(duck):
duck.quack()
duck.walk()
if __name__ == "__main__":
main()
| Safuya/python_3_essential_training | 12 Classes/classes_polymorphism2.py | Python | gpl-3.0 | 696 |
#!/usr/bin/env python
import os
import sys
PROJECT_ROOT = os.path.dirname(os.path.abspath(os.path.dirname(__file__)))
sys.path.insert(0, PROJECT_ROOT)
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.settings")
# if len(sys.argv) > 1 and sys.argv[1] == 'runserver':
# raise ValueError('This Django project is not intended for running a server.')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| genialis/django-rest-framework-reactive | tests/manage.py | Python | apache-2.0 | 505 |
Subsets and Splits