ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5aed9c3513d4c56c2dbc30ce340080d835debe
|
# Unwinder commands.
# Copyright 2015 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
import re
def validate_regexp(exp, idstring):
try:
return re.compile(exp)
except SyntaxError:
raise SyntaxError("Invalid %s regexp: %s." % (idstring, exp))
def parse_unwinder_command_args(arg):
"""Internal utility to parse unwinder command argv.
Arguments:
arg: The arguments to the command. The format is:
[locus-regexp [name-regexp]]
Returns:
A 2-tuple of compiled regular expressions.
Raises:
SyntaxError: an error processing ARG
"""
argv = gdb.string_to_argv(arg)
argc = len(argv)
if argc > 2:
raise SyntaxError("Too many arguments.")
locus_regexp = ""
name_regexp = ""
if argc >= 1:
locus_regexp = argv[0]
if argc >= 2:
name_regexp = argv[1]
return (validate_regexp(locus_regexp, "locus"),
validate_regexp(name_regexp, "unwinder"))
class InfoUnwinder(gdb.Command):
"""GDB command to list unwinders.
Usage: info unwinder [locus-regexp [name-regexp]]
LOCUS-REGEXP is a regular expression matching the location of the
unwinder. If it is omitted, all registered unwinders from all
loci are listed. A locus can be 'global', 'progspace' to list
the unwinders from the current progspace, or a regular expression
matching filenames of objfiles.
NAME-REGEXP is a regular expression to filter unwinder names. If
this omitted for a specified locus, then all registered unwinders
in the locus are listed.
"""
def __init__(self):
super(InfoUnwinder, self).__init__("info unwinder",
gdb.COMMAND_STACK)
def list_unwinders(self, title, unwinders, name_re):
"""Lists the unwinders whose name matches regexp.
Arguments:
title: The line to print before the list.
unwinders: The list of the unwinders.
name_re: unwinder name filter.
"""
if not unwinders:
return
print(title)
for unwinder in unwinders:
if name_re.match(unwinder.name):
print(" %s%s" % (unwinder.name,
"" if unwinder.enabled else " [disabled]"))
def invoke(self, arg, from_tty):
locus_re, name_re = parse_unwinder_command_args(arg)
if locus_re.match("global"):
self.list_unwinders("Global:", gdb.frame_unwinders,
name_re)
if locus_re.match("progspace"):
cp = gdb.current_progspace()
self.list_unwinders("Progspace %s:" % cp.filename,
cp.frame_unwinders, name_re)
for objfile in gdb.objfiles():
if locus_re.match(objfile.filename):
self.list_unwinders("Objfile %s:" % objfile.filename,
objfile.frame_unwinders, name_re)
def do_enable_unwinder1(unwinders, name_re, flag):
"""Enable/disable unwinders whose names match given regex.
Arguments:
unwinders: The list of unwinders.
name_re: Unwinder name filter.
flag: Enable/disable.
Returns:
The number of unwinders affected.
"""
total = 0
for unwinder in unwinders:
if name_re.match(unwinder.name):
unwinder.enabled = flag
total += 1
return total
def do_enable_unwinder(arg, flag):
"""Enable/disable unwinder(s)."""
(locus_re, name_re) = parse_unwinder_command_args(arg)
total = 0
if locus_re.match("global"):
total += do_enable_unwinder1(gdb.frame_unwinders, name_re, flag)
if locus_re.match("progspace"):
total += do_enable_unwinder1(gdb.current_progspace().frame_unwinders,
name_re, flag)
for objfile in gdb.objfiles():
if locus_re.match(objfile.filename):
total += do_enable_unwinder1(objfile.frame_unwinders, name_re,
flag)
print("%d unwinder%s %s" % (total, "" if total == 1 else "s",
"enabled" if flag else "disabled"))
class EnableUnwinder(gdb.Command):
"""GDB command to enable unwinders.
Usage: enable unwinder [locus-regexp [name-regexp]]
LOCUS-REGEXP is a regular expression specifying the unwinders to
enable. It can 'global', 'progspace', or the name of an objfile
within that progspace.
NAME_REGEXP is a regular expression to filter unwinder names. If
this omitted for a specified locus, then all registered unwinders
in the locus are affected.
"""
def __init__(self):
super(EnableUnwinder, self).__init__("enable unwinder",
gdb.COMMAND_STACK)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_unwinder(arg, True)
class DisableUnwinder(gdb.Command):
"""GDB command to disable the specified unwinder.
Usage: disable unwinder [locus-regexp [name-regexp]]
LOCUS-REGEXP is a regular expression specifying the unwinders to
disable. It can 'global', 'progspace', or the name of an objfile
within that progspace.
NAME_REGEXP is a regular expression to filter unwinder names. If
this omitted for a specified locus, then all registered unwinders
in the locus are affected.
"""
def __init__(self):
super(DisableUnwinder, self).__init__("disable unwinder",
gdb.COMMAND_STACK)
def invoke(self, arg, from_tty):
"""GDB calls this to perform the command."""
do_enable_unwinder(arg, False)
def register_unwinder_commands():
"""Installs the unwinder commands."""
InfoUnwinder()
EnableUnwinder()
DisableUnwinder()
register_unwinder_commands()
|
py
|
1a5aee8ab1c9e64c706fc2171f2a661bdad1c69b
|
from collections import defaultdict
import errno
import math
import mmap
import os
import sys
import time
import multiprocessing as mp
from six.moves import range
import numpy as np
from .lib import Bbox, Vec, mkdir
SHM_DIRECTORY = '/dev/shm/'
EMULATED_SHM_DIRECTORY = '/tmp/cloudvolume-shm'
EMULATE_SHM = not os.path.isdir(SHM_DIRECTORY)
PLATFORM_SHM_DIRECTORY = SHM_DIRECTORY if not EMULATE_SHM else EMULATED_SHM_DIRECTORY
class SharedMemoryReadError(Exception):
pass
class SharedMemoryAllocationError(Exception):
pass
def ndarray(shape, dtype, location, order='F', readonly=False, lock=None, **kwargs):
"""
Create a shared memory numpy array.
Lock is only necessary while doing multiprocessing on
platforms without /dev/shm type shared memory as
filesystem emulation will be used instead.
Allocating the shared array requires cleanup on your part.
A shared memory file will be located at sharedmemory.PLATFORM_SHM_DIRECTORY + location
and must be unlinked when you're done. It will outlive the program.
You should also call .close() on the mmap file handle when done. However,
this is less of a problem because the operating system will close the
file handle on process termination.
Parameters:
shape: same as numpy.ndarray
dtype: same as numpy.ndarray
location: the shared memory filename
lock: (optional) multiprocessing.Lock
Returns: (mmap filehandle, shared ndarray)
"""
if EMULATE_SHM:
return ndarray_fs(
shape, dtype, location, lock,
readonly, order, emulate_shm=True, **kwargs
)
return ndarray_shm(shape, dtype, location, readonly, order, **kwargs)
def ndarray_fs(
shape, dtype, location, lock,
readonly=False, order='F', emulate_shm=False,
**kwargs
):
"""Emulate shared memory using the filesystem."""
dbytes = np.dtype(dtype).itemsize
nbytes = Vec(*shape).rectVolume() * dbytes
if emulate_shm:
directory = mkdir(EMULATED_SHM_DIRECTORY)
filename = os.path.join(directory, location)
else:
filename = location
if lock:
lock.acquire()
try:
allocate_shm_file(filename, nbytes, dbytes, readonly)
finally:
if lock:
lock.release()
with open(filename, 'r+b') as f:
array_like = mmap.mmap(f.fileno(), 0) # map entire file
renderbuffer = np.ndarray(buffer=array_like, dtype=dtype, shape=shape, order=order, **kwargs)
renderbuffer.setflags(write=(not readonly))
return array_like, renderbuffer
def allocate_shm_file(filename, nbytes, dbytes, readonly):
exists = os.path.exists(filename)
size = 0 if not exists else os.path.getsize(filename)
if readonly and not exists:
raise SharedMemoryReadError(filename + " has not been allocated. Requested " + str(nbytes) + " bytes.")
elif readonly and size != nbytes:
raise SharedMemoryReadError("{} exists, but the allocation size ({} bytes) does not match the request ({} bytes).".format(
filename, size, nbytes
))
if exists:
if size > nbytes:
with open(filename, 'wb') as f:
os.ftruncate(f.fileno(), nbytes)
elif size < nbytes:
# too small? just remake it below
os.unlink(filename)
exists = os.path.exists(filename)
if not exists:
# Previously we were writing out real files full of zeros,
# but a) that takes forever and b) modern OSes support sparse
# files (i.e. gigabytes of zeros that take up only a few real bytes).
#
# The following should take advantage of this functionality and be faster.
# It should work on Python 2.7 Unix, and Python 3.5+ on Unix and Windows.
#
# References:
# https://stackoverflow.com/questions/8816059/create-file-of-particular-size-in-python
# https://docs.python.org/3/library/os.html#os.ftruncate
# https://docs.python.org/2/library/os.html#os.ftruncate
#
with open(filename, 'wb') as f:
os.ftruncate(f.fileno(), nbytes)
def ndarray_shm(shape, dtype, location, readonly=False, order='F', **kwargs):
"""Create a shared memory numpy array. Requires /dev/shm to exist."""
import posix_ipc
from posix_ipc import O_CREAT
import psutil
nbytes = Vec(*shape).rectVolume() * np.dtype(dtype).itemsize
available = psutil.virtual_memory().available
preexisting = 0
# This might only work on Ubuntu
shmloc = os.path.join(SHM_DIRECTORY, location)
if os.path.exists(shmloc):
preexisting = os.path.getsize(shmloc)
elif readonly:
raise SharedMemoryReadError(shmloc + " has not been allocated. Requested " + str(nbytes) + " bytes.")
if readonly and preexisting != nbytes:
raise SharedMemoryReadError("{} exists, but the allocation size ({} bytes) does not match the request ({} bytes).".format(
shmloc, preexisting, nbytes
))
if (nbytes - preexisting) > available:
overallocated = nbytes - preexisting - available
overpercent = (100 * overallocated / (preexisting + available))
raise SharedMemoryAllocationError("""
Requested more memory than is available.
Shared Memory Location: {}
Shape: {}
Requested Bytes: {}
Available Bytes: {}
Preexisting Bytes*: {}
Overallocated Bytes*: {} (+{:.2f}%)
* Preexisting is only correct on linux systems that support /dev/shm/""" \
.format(location, shape, nbytes, available, preexisting, overallocated, overpercent))
# This might seem like we're being "extra safe" but consider
# a threading condition where the condition of the shared memory
# was adjusted between the check above and now. Better to make sure
# that we don't accidently change anything if readonly is set.
flags = 0 if readonly else O_CREAT
size = 0 if readonly else int(nbytes)
try:
shared = posix_ipc.SharedMemory(location, flags=flags, size=size)
array_like = mmap.mmap(shared.fd, shared.size)
os.close(shared.fd)
renderbuffer = np.ndarray(buffer=array_like, dtype=dtype, shape=shape, order=order, **kwargs)
except OSError as err:
if err.errno == errno.ENOMEM: # Out of Memory
posix_ipc.unlink_shared_memory(location)
raise
renderbuffer.setflags(write=(not readonly))
return array_like, renderbuffer
def unlink(location):
if EMULATE_SHM:
return unlink_fs(location)
return unlink_shm(location)
def unlink_shm(location):
import posix_ipc
try:
posix_ipc.unlink_shared_memory(location)
except posix_ipc.ExistentialError:
return False
return True
def unlink_fs(location):
directory = mkdir(EMULATED_SHM_DIRECTORY)
try:
filename = os.path.join(directory, location)
os.unlink(filename)
return True
except OSError:
return False
|
py
|
1a5aef3f0200574347ebc34ae77278db8c827911
|
# pcost.py
import report
def portfolio_cost(filename):
'''
Computes the total cost (shares*price) of a portfolio file
'''
portfolio = report.read_portfolio(filename)
return portfolio.total_cost
def main(args):
if len(args) != 2:
raise SystemExit('Usage: %s portfoliofile' % args[0])
filename = args[1]
print('Total cost:', portfolio_cost(filename))
if __name__ == '__main__':
import sys
main(sys.argv)
|
py
|
1a5aef72aab0e01ecf5e4f26b76e927fcc94f5b3
|
# Copyright 2020 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main training script for the listops task."""
import functools
import itertools
import json
import os
import time
from absl import app
from absl import flags
from absl import logging
from flax import jax_utils
from flax import nn
from flax import optim
from flax.metrics import tensorboard
from flax.training import checkpoints
from flax.training import common_utils
import jax
from jax import random
import jax.nn
import jax.numpy as jnp
from lra_benchmarks.listops import input_pipeline
from lra_benchmarks.models.transformer import transformer
from lra_benchmarks.utils import train_utils
from ml_collections import config_flags
import numpy as np
import tensorflow.compat.v2 as tf
FLAGS = flags.FLAGS
config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string(
'model_dir', default=None, help='Directory to store model data.')
flags.DEFINE_string(
'task_name',
default='basic',
help='Name of the task used for load training/test data.')
flags.DEFINE_string(
'data_dir', default=None, help='Directory containing datasets.')
flags.DEFINE_bool(
'test_only', default=False, help='Run the evaluation on the test data.')
def create_model(key, flax_module, input_shape, model_kwargs):
"""Creates and initializes the model."""
@functools.partial(jax.jit, backend='cpu')
def _create_model(key):
module = flax_module.partial(**model_kwargs)
with nn.stochastic(key):
_, initial_params = module.init_by_shape(key,
[(input_shape, jnp.float32)])
model = nn.Model(module, initial_params)
return model
return _create_model(key)
def create_optimizer(model, learning_rate):
optimizer_def = optim.Adam(
learning_rate,
beta1=0.9,
beta2=0.98,
eps=1e-9,
weight_decay=FLAGS.config.weight_decay)
optimizer = optimizer_def.create(model)
return optimizer
def compute_metrics(logits, labels, weights):
"""Compute summary metrics."""
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, labels, num_classes=10, weights=weights)
acc, _ = train_utils.compute_weighted_accuracy(logits, labels, weights)
metrics = {
'loss': loss,
'accuracy': acc,
'denominator': weight_sum,
}
metrics = jax.lax.psum(metrics, 'batch')
return metrics
def train_step(optimizer, batch, learning_rate_fn, dropout_rng=None):
"""Perform a single training step."""
train_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in train_keys]
# We handle PRNG splitting inside the top pmap, rather
# than handling it outside in the training loop - doing the
# latter can add some stalls to the devices.
dropout_rng, new_dropout_rng = random.split(dropout_rng)
def loss_fn(model):
"""Loss function used for training."""
with nn.stochastic(dropout_rng):
logits = model(inputs, train=True)
loss, weight_sum = train_utils.compute_weighted_cross_entropy(
logits, targets, num_classes=10, weights=None)
mean_loss = loss / weight_sum
return mean_loss, logits
step = optimizer.state.step
lr = learning_rate_fn(step)
grad_fn = jax.value_and_grad(loss_fn, has_aux=True)
(_, logits), grad = grad_fn(optimizer.target)
grad = jax.lax.pmean(grad, 'batch')
new_optimizer = optimizer.apply_gradient(grad, learning_rate=lr)
metrics = compute_metrics(logits, targets, None)
metrics['learning_rate'] = lr
return new_optimizer, metrics, new_dropout_rng
def eval_step(model, batch):
eval_keys = ['inputs', 'targets']
(inputs, targets) = [batch.get(k, None) for k in eval_keys]
logits = model(inputs, train=False)
return compute_metrics(logits, targets, None)
def tohost(x):
"""Collect batches from all devices to host and flatten batch dimensions."""
n_device, n_batch, *remaining_dims = x.shape
return np.array(x).reshape((n_device * n_batch,) + tuple(remaining_dims))
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
tf.enable_v2_behavior()
config = FLAGS.config
logging.info('===========Config Dict============')
logging.info(config)
batch_size = config.batch_size
learning_rate = config.learning_rate
num_train_steps = config.num_train_steps
num_eval_steps = config.num_eval_steps
eval_freq = config.eval_frequency
random_seed = config.random_seed
model_type = config.model_type
model_kwargs = (
config.model_kwargs.to_dict() if 'model_kwargs' in config else {})
if jax.host_id() == 0:
summary_writer = tensorboard.SummaryWriter(
os.path.join(FLAGS.model_dir, 'summary'))
if batch_size % jax.device_count() > 0:
raise ValueError('Batch size must be divisible by the number of devices')
train_ds, eval_ds, test_ds, encoder = input_pipeline.get_datasets(
n_devices=jax.local_device_count(),
task_name=FLAGS.task_name,
data_dir=FLAGS.data_dir,
batch_size=batch_size,
max_length=config.max_length)
vocab_size = encoder.vocab_size
train_ds = train_ds.repeat()
train_iter = iter(train_ds)
max_length = config.max_length
input_shape = (batch_size, max_length)
model_kwargs.update({
'vocab_size': vocab_size,
'emb_dim': config.emb_dim,
'num_heads': config.num_heads,
'num_layers': config.num_layers,
'qkv_dim': config.qkv_dim,
'mlp_dim': config.mlp_dim,
'max_len': config.max_length,
'classifier': True,
'num_classes': 10
})
if hasattr(config, 'attention_fn'):
model_kwargs['attention_fn'] = config.attention_fn
rng = random.PRNGKey(random_seed)
rng = jax.random.fold_in(rng, jax.host_id())
rng, init_rng = random.split(rng)
# We init the first set of dropout PRNG keys, but update it afterwards inside
# the main pmap'd training update for performance.
dropout_rngs = random.split(rng, jax.local_device_count())
if model_type == 'transformer':
model = create_model(init_rng, transformer.TransformerEncoder, input_shape,
model_kwargs)
else:
raise ValueError('Model type not supported')
optimizer = create_optimizer(model, learning_rate)
del model # Don't keep a copy of the initial model.
start_step = 0
if config.restore_checkpoints or FLAGS.test_only:
# Restore unreplicated optimizer + model state from last checkpoint.
optimizer = checkpoints.restore_checkpoint(FLAGS.model_dir, optimizer)
# Grab last step.
start_step = int(optimizer.state.step)
# Replicate optimizer.
optimizer = jax_utils.replicate(optimizer)
learning_rate_fn = train_utils.create_learning_rate_scheduler(
base_learning_rate=learning_rate)
p_train_step = jax.pmap(
functools.partial(train_step, learning_rate_fn=learning_rate_fn),
axis_name='batch')
p_eval_step = jax.pmap(eval_step, axis_name='batch')
# p_pred_step = jax.pmap(predict_step, axis_name='batch')
def run_eval(eval_ds, num_eval_steps=-1):
eval_metrics = []
eval_iter = iter(eval_ds)
if num_eval_steps == -1:
num_iter = itertools.count()
else:
num_iter = range(num_eval_steps)
for _, eval_batch in zip(num_iter, eval_iter):
# pylint: disable=protected-access
eval_batch = common_utils.shard(
jax.tree_map(lambda x: x._numpy(), eval_batch))
# pylint: enable=protected-access
metrics = p_eval_step(optimizer.target, eval_batch)
eval_metrics.append(metrics)
eval_metrics = common_utils.get_metrics(eval_metrics)
eval_metrics_sums = jax.tree_map(jnp.sum, eval_metrics)
eval_denominator = eval_metrics_sums.pop('denominator')
eval_summary = jax.tree_map(
lambda x: x / eval_denominator, # pylint: disable=cell-var-from-loop
eval_metrics_sums)
# Calculate (clipped) perplexity after averaging log-perplexities:
eval_summary['perplexity'] = jnp.clip(
jnp.exp(eval_summary['loss']), a_max=1.0e4)
return eval_summary
if FLAGS.test_only:
with tf.io.gfile.GFile(
os.path.join(FLAGS.model_dir, 'results.json'), 'w') as f:
test_summary = run_eval(test_ds)
json.dump(jax.tree_map(lambda x: x.tolist(), test_summary), f)
return
metrics_all = []
tick = time.time()
for step, batch in zip(range(start_step, num_train_steps), train_iter):
batch = common_utils.shard(jax.tree_map(lambda x: x._numpy(), batch)) # pylint: disable=protected-access
optimizer, metrics, dropout_rngs = p_train_step(
optimizer, batch, dropout_rng=dropout_rngs)
metrics_all.append(metrics)
logging.info('train in step: %d', step)
# Save a Checkpoint
if ((step % config.checkpoint_freq == 0 and step > 0) or
step == num_train_steps - 1):
if jax.host_id() == 0 and config.save_checkpoints:
# Save unreplicated optimizer + model state.
checkpoints.save_checkpoint(FLAGS.model_dir,
jax_utils.unreplicate(optimizer), step)
# Periodic metric handling.
if step % eval_freq == 0 and step > 0:
metrics_all = common_utils.get_metrics(metrics_all)
lr = metrics_all.pop('learning_rate').mean()
metrics_sums = jax.tree_map(jnp.sum, metrics_all)
denominator = metrics_sums.pop('denominator')
summary = jax.tree_map(lambda x: x / denominator, metrics_sums) # pylint: disable=cell-var-from-loop
summary['learning_rate'] = lr
# Calculate (clipped) perplexity after averaging log-perplexities:
summary['perplexity'] = jnp.clip(jnp.exp(summary['loss']), a_max=1.0e4)
logging.info('train in step: %d, loss: %.4f', step, summary['loss'])
if jax.host_id() == 0:
tock = time.time()
steps_per_sec = eval_freq / (tock - tick)
tick = tock
summary_writer.scalar('steps per second', steps_per_sec, step)
for key, val in summary.items():
summary_writer.scalar(f'train_{key}', val, step)
summary_writer.flush()
# Reset metric accumulation for next evaluation cycle.
metrics_all = []
# Eval Metrics
eval_summary = run_eval(eval_ds, num_eval_steps)
logging.info('eval in step: %d, loss: %.4f, acc: %.4f', step,
eval_summary['loss'], eval_summary['accuracy'])
if jax.host_id() == 0:
for key, val in eval_summary.items():
summary_writer.scalar(f'eval_{key}', val, step)
summary_writer.flush()
if __name__ == '__main__':
app.run(main)
|
py
|
1a5af000f5c9ff28f331d5ed7d5b1555e124f1b3
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import subprocess
import tempfile
import testtools
from tempest.test import attr
DEVNULL = open(os.devnull, 'wb')
class TestWrappers(testtools.TestCase):
def setUp(self):
super(TestWrappers, self).setUp()
# Setup test dirs
self.directory = tempfile.mkdtemp(prefix='tempest-unit')
self.test_dir = os.path.join(self.directory, 'tests')
os.mkdir(self.test_dir)
# Setup Test files
self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
self.init_file = os.path.join(self.test_dir, '__init__.py')
self.setup_py = os.path.join(self.directory, 'setup.py')
shutil.copy('tempest/tests/files/testr-conf', self.testr_conf_file)
shutil.copy('tempest/tests/files/passing-tests', self.passing_file)
shutil.copy('tempest/tests/files/failing-tests', self.failing_file)
shutil.copy('setup.py', self.setup_py)
shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
shutil.copy('tempest/tests/files/__init__.py', self.init_file)
@attr(type='smoke')
def test_pretty_tox(self):
# Copy wrapper script and requirements:
pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
shutil.copy('tools/pretty_tox.sh', pretty_tox)
# Change directory, run wrapper and check result
self.addCleanup(os.chdir, os.path.abspath(os.curdir))
os.chdir(self.directory)
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
subprocess.call(['git', 'init'])
exit_code = subprocess.call('sh pretty_tox.sh tests.passing',
shell=True, stdout=DEVNULL, stderr=DEVNULL)
self.assertEqual(exit_code, 0)
@attr(type='smoke')
def test_pretty_tox_fails(self):
# Copy wrapper script and requirements:
pretty_tox = os.path.join(self.directory, 'pretty_tox.sh')
shutil.copy('tools/pretty_tox.sh', pretty_tox)
# Change directory, run wrapper and check result
self.addCleanup(os.chdir, os.path.abspath(os.curdir))
os.chdir(self.directory)
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
subprocess.call(['git', 'init'])
exit_code = subprocess.call('sh pretty_tox.sh', shell=True,
stdout=DEVNULL, stderr=DEVNULL)
self.assertEqual(exit_code, 1)
@attr(type='smoke')
def test_pretty_tox_serial(self):
# Copy wrapper script and requirements:
pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
# Change directory, run wrapper and check result
self.addCleanup(os.chdir, os.path.abspath(os.curdir))
os.chdir(self.directory)
exit_code = subprocess.call('sh pretty_tox_serial.sh tests.passing',
shell=True, stdout=DEVNULL, stderr=DEVNULL)
self.assertEqual(exit_code, 0)
@attr(type='smoke')
def test_pretty_tox_serial_fails(self):
# Copy wrapper script and requirements:
pretty_tox = os.path.join(self.directory, 'pretty_tox_serial.sh')
shutil.copy('tools/pretty_tox_serial.sh', pretty_tox)
# Change directory, run wrapper and check result
self.addCleanup(os.chdir, os.path.abspath(os.curdir))
os.chdir(self.directory)
exit_code = subprocess.call('sh pretty_tox_serial.sh', shell=True,
stdout=DEVNULL, stderr=DEVNULL)
self.assertEqual(exit_code, 1)
|
py
|
1a5af153f6840abb4bfbac94780922b7552a45b8
|
import spidev, time
spi = spidev.SpiDev()
spi.open(0,0)
def analog_read(channel):
r = spi.xfer2([1, (8 + channel) << 4, 0])
adc_out = ((r[1]&3) << 8) + r[2]
return adc_out
while True:
reading = analog_read(0)
voltage = reading * 3.3 / 1024
print("Reading=%d\tVoltage=%f" % (reading, voltage))
time.sleep(1)
|
py
|
1a5af1985c7e2aac19dec65e1f6ff7ee0016137f
|
"""
Compare two or more phasings
"""
import logging
import math
from collections import defaultdict
from contextlib import ExitStack
import dataclasses
from itertools import chain, permutations
from typing import Set, List, Optional, DefaultDict, Dict
from whatshap.vcf import VcfReader, VcfVariant, VariantTable, PloidyError
from whatshap.core import Genotype, SwitchFlipCalculator
from whatshap.cli import CommandLineError
logger = logging.getLogger(__name__)
COUNT_WIDTH = 9
# fmt: off
def add_arguments(parser):
add = parser.add_argument
add('--sample', metavar='SAMPLE', default=None, help='Name of the sample '
'to process. If not given, use first sample found in VCF.')
add('--names', metavar='NAMES', default=None, help='Comma-separated list '
'of data set names to be used in the report (in same order as VCFs).')
add('--ignore-sample-name', default=False, action='store_true', help='For single '
'sample VCFs, ignore sample name and assume all samples are the same.')
add('--tsv-pairwise', metavar='TSVPAIRWISE', default=None, help='Filename to write '
'comparison results from pair-wise comparison to (tab-separated).')
add('--tsv-multiway', metavar='TSVMULTIWAY', default=None, help='Filename to write '
'comparison results from multiway comparison to (tab-separated). Only for diploid vcfs.')
add('--only-snvs', default=False, action="store_true", help='Only process SNVs '
'and ignore all other variants.')
add('--switch-error-bed', default=None, help='Write BED file with switch error positions '
'to given filename. Only for diploid vcfs.')
add('--plot-blocksizes', default=None, help='Write PDF file with a block length histogram '
'to given filename (requires matplotlib).')
add('--plot-sum-of-blocksizes', default=None, help='Write PDF file with a block length histogram in which the height of each bar corresponds to the sum of lengths.')
add('--longest-block-tsv', default=None, help='Write position-wise agreement of longest '
'joint blocks in each chromosome to tab-separated file. Only for diploid vcfs.')
add('--ploidy', '-p', metavar='PLOIDY', type=int, default=2, help='The ploidy of the sample(s) (default: %(default)s).')
# TODO: what's the best way to request "two or more" VCFs?
add('vcf', nargs='+', metavar='VCF', help='At least two phased VCF files to be compared.')
# fmt: on
def validate(args, parser):
if len(args.vcf) < 2:
parser.error("At least two VCFs need to be given.")
if args.ploidy < 2:
parser.error("Ploidy must be > 1.")
if args.ploidy > 2 and args.tsv_multiway:
parser.error("Option --tsv-multiway can only be used if ploidy=2.")
if args.ploidy > 2 and args.switch_error_bed:
parser.error("Option --switch-error-bed can only be used if ploidy=2.")
if args.ploidy > 2 and args.longest_block_tsv:
parser.error("Option --longest-block-tsv can only be used if ploidy=2.")
class SwitchFlips:
def __init__(self, switches: int = 0, flips: int = 0):
self.switches: int = switches
self.flips: int = flips
def __iadd__(self, other):
self.switches += other.switches
self.flips += other.flips
return self
def __repr__(self):
return "SwitchFlips(switches={}, flips={})".format(self.switches, self.flips)
def __str__(self):
return "{}/{}".format(self.switches, self.flips)
class PhasingErrors:
def __init__(
self,
switches: int = 0,
hamming: int = 0,
switch_flips: Optional[SwitchFlips] = None,
diff_genotypes: int = 0,
):
self.switches = switches
self.hamming = hamming
self.switch_flips = SwitchFlips() if switch_flips is None else switch_flips
self.diff_genotypes = diff_genotypes
def __iadd__(self, other: object) -> "PhasingErrors":
if not isinstance(other, PhasingErrors):
raise TypeError("Can only add to PhasingErrors")
self.switches += other.switches
self.hamming += other.hamming
self.switch_flips += other.switch_flips
self.diff_genotypes += other.diff_genotypes
return self
def __repr__(self):
return "PhasingErrors(switches={}, hamming={}, switch_flips={}, diff_genotypes={})".format(
self.switches, self.hamming, self.switch_flips, self.diff_genotypes
)
def complement(s):
"""
>>> complement('01100')
'10011'
"""
t = {"0": "1", "1": "0"}
return "".join(t[c] for c in s)
def hamming(s0, s1):
"""
>>> hamming('ABCD', 'AXCY')
2
"""
assert len(s0) == len(s1)
return sum(c0 != c1 for c0, c1 in zip(s0, s1))
def switch_encoding(phasing):
"""
>>> switch_encoding('0001011')
'001110'
"""
assert isinstance(phasing, str)
return "".join(("0" if phasing[i - 1] == phasing[i] else "1") for i in range(1, len(phasing)))
def compute_switch_flips(phasing0, phasing1) -> SwitchFlips:
assert len(phasing0) == len(phasing1)
s0 = switch_encoding(phasing0)
s1 = switch_encoding(phasing1)
result = SwitchFlips()
switches_in_a_row = 0
for i, (p0, p1) in enumerate(zip(s0, s1)):
if p0 != p1:
switches_in_a_row += 1
if (i + 1 == len(s0)) or (p0 == p1):
result.flips += switches_in_a_row // 2
result.switches += switches_in_a_row % 2
switches_in_a_row = 0
return result
def compute_matching_genotype_pos(phasing0, phasing1):
"""
Computes the positions on which both phasings agree on the genotype.
"""
assert len(phasing0) == len(phasing1)
assert len(phasing0) >= 2
assert len(phasing0[0]) == len(phasing1[0])
assert all(len(phasing0[i]) == len(phasing0[0]) for i in range(1, len(phasing0)))
num_vars = len(phasing0[0])
matching_pos = [
i
for i in range(num_vars)
if Genotype([int(hap[i]) for hap in phasing0])
== Genotype([int(hap[i]) for hap in phasing1])
]
return matching_pos
def compute_switch_errors_poly(phasing0, phasing1, matching_pos=None):
"""
Computes the number of necessary switches to transform phasing 0 into phasing 1 or vice versa.
Positions with non-matching genotypes are omitted.
"""
assert len(phasing0) == len(phasing1)
assert len(phasing0) >= 2
assert len(phasing0[0]) == len(phasing1[0])
assert all(len(phasing0[i]) == len(phasing0[0]) for i in range(1, len(phasing0)))
num_vars = len(phasing0[0])
# If positions with matching genotypes are not precomputed, do it here!
if matching_pos is None:
matching_pos = compute_matching_genotype_pos(phasing0, phasing1)
phasing0_matched = ["".join([hap[i] for i in matching_pos]) for hap in phasing0]
phasing1_matched = ["".join([hap[i] for i in matching_pos]) for hap in phasing1]
vector_error = compute_switch_flips_poly(
phasing0_matched,
phasing1_matched,
switch_cost=1,
flip_cost=2 * num_vars * len(phasing0) + 1,
)
assert vector_error.flips == 0
return vector_error.switches
def compute_switch_flips_poly(phasing0, phasing1, switch_cost=1, flip_cost=1):
"""
Computes the combined number of switches and flips, which are needed to transform phasing 0 into
phasing 1 or vice versa.
"""
(result, switches_in_column, flips_in_column, poswise_config) = compute_switch_flips_poly_bt(
phasing0, phasing1, switch_cost=switch_cost, flip_cost=flip_cost
)
return result
def compute_switch_flips_poly_bt(
phasing0, phasing1, report_error_positions=False, switch_cost=1, flip_cost=1
):
# Check input
if len(phasing0) != len(phasing1):
logger.error(
"Incompatible phasings. Number of haplotypes is not equal "
f"({len(phasing0)} != {len(phasing1)})."
)
assert len(phasing0) == len(phasing1)
num_pos = len(phasing0[0])
if num_pos == 0:
return SwitchFlips(), None, None, None
ploidy = len(phasing0)
if ploidy == 0:
return SwitchFlips(), None, None, None
for i in range(0, len(phasing1)):
if len(phasing1[i]) != num_pos:
logger.error(
"Inconsistent input for phasing. Haplotypes have different lengths "
f"( len(phasing1[0]={num_pos} != len(phasing1[{i}]={len(phasing1[i])}."
)
assert len(phasing1[i]) == num_pos
if len(phasing0[i]) != num_pos:
logger.error(
"Inconsistent input for phasing. Haplotypes have different lengths "
f"( len(phasing1[0]={num_pos} != len(phasing0[{i}]={len(phasing0[i])}."
)
assert len(phasing1[i]) == num_pos
if ploidy > 6:
logger.warning(
"Computing vector error with more than 6 haplotypes. This may take very long ..."
)
# Compute comparison
calc = SwitchFlipCalculator(ploidy, switch_cost, flip_cost)
result = SwitchFlips()
(
switches,
flips,
switches_in_column,
flips_in_column,
positionwise_config,
) = calc.compute_switch_flips_poly(phasing0, phasing1)
# Aggregate results
result.switches = switches / ploidy
result.flips = flips / ploidy
return result, switches_in_column, flips_in_column, positionwise_config
def poly_num_switches(perm0, perm1):
cost = 0
for i in range(len(perm0)):
if perm0[i] != perm1[i]:
cost += 1
return cost
def compare_block(phasing0, phasing1):
""" Input are two lists of haplotype sequences over {0,1}. """
assert len(phasing0) == len(phasing1)
ploidy = len(phasing0)
minimum_hamming_distance = float("inf")
# compute minimum hamming distance
for permutation in permutations(phasing0):
# compute sum of hamming distances
total_hamming = 0
for i in range(ploidy):
total_hamming += hamming(phasing1[i], permutation[i])
total_hamming /= float(ploidy)
minimum_hamming_distance = min(minimum_hamming_distance, total_hamming)
matching_pos = compute_matching_genotype_pos(phasing0, phasing1)
if ploidy == 2:
# conversion to int is allowed, as there should be no fractional error counts for diploid comparisons
switches = int(hamming(switch_encoding(phasing0[0]), switch_encoding(phasing1[0])))
switch_flips = compute_switch_flips(phasing0[0], phasing1[0])
minimum_hamming_distance = int(minimum_hamming_distance)
else:
switches = compute_switch_errors_poly(phasing0, phasing1, matching_pos)
switch_flips = compute_switch_flips_poly(phasing0, phasing1)
return PhasingErrors(
switches=switches,
hamming=minimum_hamming_distance,
switch_flips=switch_flips,
diff_genotypes=len(phasing0[0]) - len(matching_pos),
)
def fraction2percentstr(nominator, denominator):
if denominator == 0:
return "--"
else:
return "{:.2f}%".format(nominator * 100.0 / denominator)
def safefraction(nominator, denominator):
if denominator == 0:
return float("nan")
else:
return nominator / denominator
def create_bed_records(chromosome, phasing0, phasing1, positions, annotation_string):
"""Determines positions of switch errors between two phasings
and yields one BED record per switch error (encoded as a tuple).
The annotation_string is added to each record."""
assert len(phasing0) == len(phasing1) == len(positions)
switch_encoding0 = switch_encoding(phasing0)
switch_encoding1 = switch_encoding(phasing1)
for i, (sw0, sw1) in enumerate(zip(switch_encoding0, switch_encoding1)):
if sw0 != sw1:
yield (chromosome, positions[i] + 1, positions[i + 1] + 1, annotation_string)
def print_stat(text: str, value=None, value2=None, text_width=37):
"""
Print a line like this:
text: value
"""
text = text.rjust(text_width)
if value is None:
assert value2 is None
print(text)
else:
if value == "-":
value = "-" * COUNT_WIDTH
else:
value = str(value).rjust(COUNT_WIDTH)
if value2 is None:
print(text + ":", value)
else:
print(text + ":", value, str(value2).rjust(COUNT_WIDTH))
def print_errors(errors, phased_pairs):
print_stat("phased pairs of variants assessed", phased_pairs)
print_stat("switch errors", errors.switches)
print_stat("switch error rate", fraction2percentstr(errors.switches, phased_pairs))
print_stat("switch/flip decomposition", errors.switch_flips)
print_stat(
"switch/flip rate",
fraction2percentstr(errors.switch_flips.switches + errors.switch_flips.flips, phased_pairs),
)
@dataclasses.dataclass
class PairwiseComparisonResults:
intersection_blocks: int
covered_variants: int
all_assessed_pairs: int
all_switches: int
all_switch_rate: float
all_switchflips: SwitchFlips
all_switchflip_rate: float
blockwise_hamming: int
blockwise_hamming_rate: int
blockwise_diff_genotypes: int
blockwise_diff_genotypes_rate: int
largestblock_assessed_pairs: int
largestblock_switches: int
largestblock_switch_rate: float
largestblock_switchflips: SwitchFlips
largestblock_switchflip_rate: float
largestblock_hamming: int
largestblock_hamming_rate: float
largestblock_diff_genotypes: int
largestblock_diff_genotypes_rate: float
@dataclasses.dataclass
class BlockStats:
variant_count: int
span: int
def collect_common_variants(
variant_tables: List[VariantTable], sample_names: List[str]
) -> Set[VcfVariant]:
common_variants = None
for variant_table, sample in zip(variant_tables, sample_names):
het_variants = [
v
for v, gt in zip(variant_table.variants, variant_table.genotypes_of(sample))
if not gt.is_homozygous()
]
if common_variants is None:
common_variants = set(het_variants)
else:
common_variants.intersection_update(het_variants)
assert common_variants is not None
return common_variants
def compare(
variant_tables: List[VariantTable],
sample_names: List[str],
dataset_names: List[str],
ploidy: int,
):
"""
Return a PairwiseComparisonResults object if the variant_tables has a length of 2.
"""
assert len(variant_tables) > 1
common_variants = collect_common_variants(variant_tables, sample_names)
assert common_variants is not None
print_stat("common heterozygous variants", len(common_variants))
print_stat("(restricting to these below)")
phases = []
sorted_variants = sorted(common_variants, key=lambda v: v.position)
for variant_table, sample in zip(variant_tables, sample_names):
p = [
phase
for variant, phase in zip(variant_table.variants, variant_table.phases_of(sample))
if variant in common_variants
]
assert [v for v in variant_table.variants if v in common_variants] == sorted_variants
assert len(p) == len(common_variants)
phases.append(p)
# blocks[variant_table_index][block_id] is a list of indices into common_variants
blocks: List[DefaultDict[int, List[int]]] = [defaultdict(list) for _ in variant_tables]
block_intersection = defaultdict(list)
for variant_index in range(len(common_variants)):
any_none = False
for i in range(len(phases)):
phase = phases[i][variant_index]
if phase is None:
any_none = True
else:
blocks[i][phase.block_id].append(variant_index)
if not any_none:
joint_block_id = tuple(
phase[variant_index].block_id for phase in phases # type: ignore
)
block_intersection[joint_block_id].append(variant_index)
# create statistics on each block in each data set
block_stats = compute_block_stats(blocks, sorted_variants)
for dataset_name, blck in zip(dataset_names, blocks):
print_stat(
"non-singleton blocks in {}".format(dataset_name),
len([b for b in blck.values() if len(b) > 1]),
)
print_stat("--> covered variants", sum(len(b) for b in blck.values() if len(b) > 1))
intersection_block_count = sum(1 for b in block_intersection.values() if len(b) > 1)
intersection_block_variants = sum(len(b) for b in block_intersection.values() if len(b) > 1)
print_stat("non-singleton intersection blocks", intersection_block_count)
print_stat("--> covered variants", intersection_block_variants)
if len(variant_tables) == 2:
(
bed_records,
longest_block_agreement,
longest_block_positions,
pairwise_comparison,
) = compare_pair(
block_intersection,
dataset_names,
intersection_block_count,
intersection_block_variants,
phases,
ploidy,
sorted_variants,
variant_tables,
)
return (
pairwise_comparison,
bed_records,
block_stats,
longest_block_positions,
longest_block_agreement,
None,
)
else:
assert ploidy == 2
multiway_results = compare_multiway(block_intersection, dataset_names, phases)
return None, None, block_stats, None, None, multiway_results
def compare_pair(
block_intersection,
dataset_names,
intersection_block_count,
intersection_block_variants,
phases,
ploidy,
sorted_variants,
variant_tables,
):
longest_block = 0
longest_block_errors = PhasingErrors()
longest_block_positions = []
longest_block_agreement = []
phased_pairs = 0
bed_records = []
total_errors = PhasingErrors()
total_compared_variants = 0
for block in block_intersection.values():
if len(block) < 2:
continue
phasing0 = []
phasing1 = []
for j in range(ploidy):
p0 = "".join(str(phases[0][i].phase[j]) for i in block)
p1 = "".join(str(phases[1][i].phase[j]) for i in block)
phasing0.append(p0)
phasing1.append(p1)
block_positions = [sorted_variants[i].position for i in block]
errors = compare_block(phasing0, phasing1)
# TODO: extend to polyploid
if ploidy == 2:
bed_records.extend(
create_bed_records(
variant_tables[0].chromosome,
phasing0[0],
phasing1[0],
block_positions,
"{}<-->{}".format(*dataset_names),
)
)
total_errors += errors
phased_pairs += len(block) - 1
total_compared_variants += len(block)
if len(block) > longest_block:
longest_block = len(block)
longest_block_errors = errors
longest_block_positions = block_positions
# TODO: extend to polyploid
if ploidy == 2:
if hamming(phasing0, phasing1) < hamming(phasing0[0], complement(phasing1[0])):
longest_block_agreement = [
1 * (p0 == p1) for p0, p1 in zip(phasing0[0], phasing1[0])
]
else:
longest_block_agreement = [
1 * (p0 != p1) for p0, p1 in zip(phasing0[0], phasing1[0])
]
longest_block_assessed_pairs = max(longest_block - 1, 0)
print_stat("ALL INTERSECTION BLOCKS", "-")
print_errors(total_errors, phased_pairs)
print_stat("Block-wise Hamming distance", total_errors.hamming)
print_stat(
"Block-wise Hamming distance [%]",
fraction2percentstr(total_errors.hamming, total_compared_variants),
)
print_stat("Different genotypes", total_errors.diff_genotypes)
print_stat(
"Different genotypes [%]",
fraction2percentstr(total_errors.diff_genotypes, total_compared_variants),
)
print_stat("LARGEST INTERSECTION BLOCK", "-")
print_errors(longest_block_errors, longest_block_assessed_pairs)
print_stat("Hamming distance", longest_block_errors.hamming)
print_stat(
"Hamming distance [%]", fraction2percentstr(longest_block_errors.hamming, longest_block)
)
print_stat("Different genotypes", longest_block_errors.diff_genotypes)
print_stat(
"Different genotypes [%]",
fraction2percentstr(longest_block_errors.diff_genotypes, longest_block),
)
pcr = PairwiseComparisonResults(
intersection_blocks=intersection_block_count,
covered_variants=intersection_block_variants,
all_assessed_pairs=phased_pairs,
all_switches=total_errors.switches,
all_switch_rate=safefraction(total_errors.switches, phased_pairs),
all_switchflips=total_errors.switch_flips,
all_switchflip_rate=safefraction(
total_errors.switch_flips.switches + total_errors.switch_flips.flips, phased_pairs
),
blockwise_hamming=total_errors.hamming,
blockwise_hamming_rate=safefraction(total_errors.hamming, total_compared_variants),
blockwise_diff_genotypes=total_errors.diff_genotypes,
blockwise_diff_genotypes_rate=safefraction(
total_errors.diff_genotypes, total_compared_variants
),
largestblock_assessed_pairs=longest_block_assessed_pairs,
largestblock_switches=longest_block_errors.switches,
largestblock_switch_rate=safefraction(
longest_block_errors.switches, longest_block_assessed_pairs
),
largestblock_switchflips=longest_block_errors.switch_flips,
largestblock_switchflip_rate=safefraction(
longest_block_errors.switch_flips.switches + longest_block_errors.switch_flips.flips,
longest_block_assessed_pairs,
),
largestblock_hamming=longest_block_errors.hamming,
largestblock_hamming_rate=safefraction(longest_block_errors.hamming, longest_block),
largestblock_diff_genotypes=longest_block_errors.diff_genotypes,
largestblock_diff_genotypes_rate=safefraction(
longest_block_errors.diff_genotypes, longest_block
),
)
return bed_records, longest_block_agreement, longest_block_positions, pcr
def compare_multiway(block_intersection, dataset_names, phases):
histogram = defaultdict(int)
total_compared = 0
for block in block_intersection.values():
if len(block) < 2:
continue
total_compared += len(block) - 1
phasings = ["".join(str(phases[j][i].phase[0]) for i in block) for j in range(len(phases))]
switch_encodings = [switch_encoding(p) for p in phasings]
for i in range(len(block) - 1):
s = "".join(switch_encodings[j][i] for j in range(len(switch_encodings)))
s = min(s, complement(s))
histogram[s] += 1
print_stat("Compared pairs of variants", total_compared)
bipartitions = list(histogram.keys())
bipartitions.sort()
multiway_results = {} # (dataset_list0, dataset_list1) --> count
for i, s in enumerate(bipartitions):
count = histogram[s]
if i == 0:
assert set(c for c in s) == set("0")
print("ALL AGREE")
elif i == 1:
print("DISAGREEMENT")
left, right = [], []
for name, leftright in zip(dataset_names, s):
if leftright == "0":
left.append(name)
else:
right.append(name)
print_stat(
("{%s} vs. {%s}" % (",".join(left), ",".join(right))),
count,
fraction2percentstr(count, total_compared),
)
multiway_results[(",".join(left), ",".join(right))] = count
return multiway_results
def compute_block_stats(
blocks: List[DefaultDict[int, List[int]]], sorted_variants: List[VcfVariant]
):
block_stats = []
for block in blocks:
l = []
for block_id, variant_indices in block.items():
if len(variant_indices) < 2:
continue
span = (
sorted_variants[variant_indices[-1]].position
- sorted_variants[variant_indices[0]].position
)
l.append(BlockStats(len(variant_indices), span))
block_stats.append(l)
return block_stats
def create_blocksize_histogram(filename, block_stats, names, use_weights=False):
try:
import matplotlib
import numpy
matplotlib.use("pdf")
from matplotlib import pyplot
from matplotlib.backends.backend_pdf import PdfPages
except ImportError:
raise CommandLineError(
"To use option --plot-blocksizes, you need to have numpy and matplotlib installed."
)
assert len(block_stats) == len(names)
color_list = ["#ffa347", "#0064c8", "#b42222", "#22a5b4", "#b47c22", "#6db6ff"]
if len(color_list) < len(block_stats):
color_count = len(block_stats)
color_list = pyplot.cm.Set1([n / color_count for n in range(color_count)])
colors = color_list[: len(block_stats)]
with PdfPages(filename) as pdf:
for what, xlabel in [
(lambda stats: stats.variant_count, "variant count"),
(lambda stats: stats.span, "span [bp]"),
]:
pyplot.figure(figsize=(10, 8))
max_value = max(what(stats) for stats in chain(*block_stats))
common_bins = numpy.logspace(0, math.ceil(math.log10(max_value)), 50)
for l, name, color in zip(block_stats, names, colors):
x = [what(stats) for stats in l]
n, bins, patches = pyplot.hist(
x,
bins=common_bins,
alpha=0.6,
color=color,
label=name,
weights=x if use_weights else None,
)
pyplot.xlabel(xlabel)
pyplot.ylabel("Number of blocks")
pyplot.gca().set_xscale("log")
pyplot.gca().set_yscale("log")
pyplot.grid(True)
pyplot.legend()
pdf.savefig()
pyplot.close()
pyplot.figure(figsize=(10, 8))
common_bins = numpy.logspace(0, math.ceil(math.log10(max_value)), 25)
x = [[what(stats) for stats in l] for l in block_stats]
n, bins, patches = pyplot.hist(
x,
bins=common_bins,
alpha=0.6,
color=colors,
label=names,
weights=x if use_weights else None,
)
pyplot.xlabel(xlabel)
pyplot.ylabel("Number of blocks")
pyplot.gca().set_xscale("log")
pyplot.gca().set_yscale("log")
pyplot.grid(True)
pyplot.legend()
pdf.savefig()
pyplot.close()
def run_compare(
vcf,
ploidy,
names=None,
sample=None,
ignore_sample_name=False,
tsv_pairwise=None,
tsv_multiway=None,
only_snvs=False,
switch_error_bed=None,
plot_blocksizes=None,
plot_sum_of_blocksizes=None,
longest_block_tsv=None,
):
vcf_readers = [VcfReader(f, indels=not only_snvs, phases=True, ploidy=ploidy) for f in vcf]
if names:
dataset_names = names.split(",")
if len(dataset_names) != len(vcf):
raise CommandLineError(
"Number of names given with --names does not equal number of VCFs."
)
else:
dataset_names = ["file{}".format(i) for i in range(len(vcf))]
sample_names = get_sample_names(
vcf_readers, requested_sample=sample, ignore_name=ignore_sample_name
)
with ExitStack() as stack:
tsv_pairwise_file = tsv_multiway_file = longest_block_tsv_file = switch_error_bedfile = None
if tsv_pairwise:
tsv_pairwise_file = stack.enter_context(open(tsv_pairwise, "w"))
if tsv_multiway:
tsv_multiway_file = stack.enter_context(open(tsv_multiway, "w"))
print(
"#sample",
"chromosome",
"dataset_list0",
"dataset_list1",
"count",
sep="\t",
file=tsv_multiway_file,
)
if longest_block_tsv:
longest_block_tsv_file = stack.enter_context(open(longest_block_tsv, "w"))
print(
"#dataset_name0",
"dataset_name1",
"#sample",
"chromosome",
"position",
"phase_agreeing",
sep="\t",
file=longest_block_tsv_file,
)
if tsv_pairwise_file:
fields = [
"#sample",
"chromosome",
"dataset_name0",
"dataset_name1",
"file_name0",
"file_name1",
]
field_names = [f.name for f in dataclasses.fields(PairwiseComparisonResults)]
fields.extend(field_names)
fields.extend(["het_variants0", "only_snvs"])
print(*fields, sep="\t", file=tsv_pairwise_file)
if switch_error_bed:
switch_error_bedfile = stack.enter_context(open(switch_error_bed, "w"))
if len(set(sample_names)) > 1 and ignore_sample_name:
print(
"Comparing phasings for samples:",
", ".join(sample_names),
" (--ignore-sample-names selected)",
)
else:
print("Comparing phasings for sample", sample_names[0])
vcfs = get_variant_tables(vcf_readers, vcf)
chromosomes = get_common_chromosomes(vcfs)
if len(chromosomes) == 0:
raise CommandLineError("No chromosome is contained in all VCFs. Aborting.")
logger.info("Chromosomes present in all VCFs: %s", ", ".join(chromosomes))
print("FILENAMES")
longest_name = max(len(n) for n in dataset_names)
for name, filename in zip(dataset_names, vcf):
print(name.rjust(longest_name + 2), "=", filename)
width = max(longest_name, 15) + 5
all_block_stats = [[] for _ in vcfs]
def add_block_stats(block_stats):
assert len(block_stats) == len(all_block_stats)
for big_list, new_list in zip(all_block_stats, block_stats):
big_list.extend(new_list)
for chromosome in sorted(chromosomes):
print("---------------- Chromosome {} ----------------".format(chromosome))
all_bed_records = []
variant_tables = [vcf[chromosome] for vcf in vcfs]
all_variants_union = set()
all_variants_intersection = None
het_variants_union = set()
het_variants_intersection = None
het_variant_sets = []
het_variants0 = None
print("VARIANT COUNTS (heterozygous / all): ")
for variant_table, name, sample in zip(variant_tables, dataset_names, sample_names):
all_variants_union.update(variant_table.variants)
het_variants = [
v
for v, gt in zip(variant_table.variants, variant_table.genotypes_of(sample))
if not gt.is_homozygous()
]
if het_variants0 is None:
het_variants0 = len(het_variants)
het_variants_union.update(het_variants)
if all_variants_intersection is None:
all_variants_intersection = set(variant_table.variants)
het_variants_intersection = set(het_variants)
else:
all_variants_intersection.intersection_update(variant_table.variants)
het_variants_intersection.intersection_update(het_variants)
het_variant_sets.append(set(het_variants))
print(
"{}:".format(name).rjust(width),
str(len(het_variants)).rjust(COUNT_WIDTH),
"/",
str(len(variant_table.variants)).rjust(COUNT_WIDTH),
)
print(
"UNION:".rjust(width),
str(len(het_variants_union)).rjust(COUNT_WIDTH),
"/",
str(len(all_variants_union)).rjust(COUNT_WIDTH),
)
print(
"INTERSECTION:".rjust(width),
str(len(het_variants_intersection)).rjust(COUNT_WIDTH),
"/",
str(len(all_variants_intersection)).rjust(COUNT_WIDTH),
)
for i in range(len(vcfs)):
for j in range(i + 1, len(vcfs)):
print(
"PAIRWISE COMPARISON: {} <--> {}:".format(
dataset_names[i], dataset_names[j]
)
)
(
results,
bed_records,
block_stats,
longest_block_positions,
longest_block_agreement,
multiway_results,
) = compare(
[variant_tables[i], variant_tables[j]],
[sample_names[i], sample_names[j]],
[dataset_names[i], dataset_names[j]],
ploidy,
)
if len(vcfs) == 2:
add_block_stats(block_stats)
all_bed_records.extend(bed_records)
sample_name = (
f"{sample_names[i]}_{sample_names[j]}"
if ignore_sample_name
else sample_names[i]
)
if tsv_pairwise_file:
fields = [
sample_name,
chromosome,
dataset_names[i],
dataset_names[j],
vcf[i],
vcf[j],
]
fields.extend(dataclasses.astuple(results))
fields.extend([het_variants0, int(only_snvs)])
print(*fields, sep="\t", file=tsv_pairwise_file)
if longest_block_tsv_file:
assert ploidy == 2
assert len(longest_block_positions) == len(longest_block_agreement)
for position, phase_agreeing in zip(
longest_block_positions, longest_block_agreement
):
print(
dataset_names[i],
dataset_names[j],
sample_name,
chromosome,
position,
phase_agreeing,
sep="\t",
file=longest_block_tsv_file,
)
# if requested, write all switch errors found in the current chromosome to the bed file
if switch_error_bedfile:
assert ploidy == 2
all_bed_records.sort()
for record in all_bed_records:
print(*record, sep="\t", file=switch_error_bedfile)
if len(vcfs) > 2:
assert ploidy == 2
print("MULTIWAY COMPARISON OF ALL PHASINGS:")
(
results,
bed_records,
block_stats,
longest_block_positions,
longest_block_agreement,
multiway_results,
) = compare(variant_tables, sample_names, dataset_names, ploidy)
add_block_stats(block_stats)
if tsv_multiway_file:
sample_name = (
"_".join(set(sample_names)) if ignore_sample_name else sample_names[0]
)
for ((dataset_list0, dataset_list1), count) in multiway_results.items():
print(
sample_name,
chromosome,
"{" + dataset_list0 + "}",
"{" + dataset_list1 + "}",
count,
sep="\t",
file=tsv_multiway_file,
)
if plot_blocksizes:
create_blocksize_histogram(plot_blocksizes, all_block_stats, dataset_names)
if plot_sum_of_blocksizes:
create_blocksize_histogram(
plot_sum_of_blocksizes, all_block_stats, dataset_names, use_weights=True
)
def get_common_chromosomes(vcfs: List[Dict[str, VariantTable]]) -> List[str]:
common = None
for chrom_variant_table_map in vcfs:
chromosomes = chrom_variant_table_map.keys()
if common is None:
common = set(chromosomes)
else:
common.intersection_update(chromosomes)
if common is None:
return []
return sorted(common)
def get_variant_tables(
vcf_readers: List[VcfReader], vcf_filenames: List[str]
) -> List[Dict[str, VariantTable]]:
vcfs = []
for reader, filename in zip(vcf_readers, vcf_filenames):
# create dict mapping chromosome names to VariantTables
m = dict()
logger.info("Reading phasing from %r", filename)
try:
for variant_table in reader:
m[variant_table.chromosome] = variant_table
except PloidyError as e:
raise CommandLineError("Provided ploidy is invalid: {}. Aborting.".format(e))
vcfs.append(m)
return vcfs
def get_sample_names(
vcf_readers: List[VcfReader], requested_sample: Optional[str], ignore_name: bool = False
) -> List[str]:
first_samples = []
sample_intersection = None
for vcf_reader in vcf_readers:
if sample_intersection is None:
sample_intersection = set(vcf_reader.samples)
else:
sample_intersection.intersection_update(vcf_reader.samples)
if ignore_name and len(vcf_reader.samples) > 1:
raise CommandLineError(
"File '{file}' contains multiple samples, option --ignore-sample-name not available.".format(
file=vcf_reader.path
)
)
first_samples.append(vcf_reader.samples[0])
assert sample_intersection is not None
if requested_sample:
sample_intersection.intersection_update([requested_sample])
if len(sample_intersection) == 0:
raise CommandLineError(
"Sample {!r} requested on command-line not found in all VCFs".format(
requested_sample
)
)
sample_names = [requested_sample] * len(vcf_readers)
elif ignore_name:
sample_names = first_samples
else:
if len(sample_intersection) == 0:
raise CommandLineError("None of the samples is present in all VCFs")
elif len(sample_intersection) == 1:
sample_names = [list(sample_intersection)[0]] * len(vcf_readers)
else:
raise CommandLineError(
"More than one sample is present in all VCFs, please use"
" --sample to specify which sample to work on."
)
return sample_names
def main(args):
run_compare(**vars(args))
|
py
|
1a5af327ba176b51d4025fde0797de900e2125e0
|
#!/usr/bin/env python
#
# Use the raw transactions API to spend bitcoins received on particular addresses,
# and send any change back to that same address.
#
# Example usage:
# spendfrom.py # Lists available funds
# spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00
#
# Assumes it will talk to a bitcoind or Bitcoin-Qt running
# on localhost.
#
# Depends on jsonrpc
#
from decimal import *
import getpass
import math
import os
import os.path
import platform
import sys
import time
from jsonrpc import ServiceProxy, json
BASE_FEE=Decimal("0.001")
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n)))*1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def determine_db_dir():
"""Return the default location of the bitcoin data directory"""
if platform.system() == "Darwin":
return os.path.expanduser("~/Library/Application Support/Bitcoin/")
elif platform.system() == "Windows":
return os.path.join(os.environ['APPDATA'], "Bitcoin")
return os.path.expanduser("~/.bitcoin")
def read_bitcoin_config(dbdir):
"""Read the bitcoin.conf file from dbdir, returns dictionary of settings"""
from ConfigParser import SafeConfigParser
class FakeSecHead(object):
def __init__(self, fp):
self.fp = fp
self.sechead = '[all]\n'
def readline(self):
if self.sechead:
try: return self.sechead
finally: self.sechead = None
else:
s = self.fp.readline()
if s.find('#') != -1:
s = s[0:s.find('#')].strip() +"\n"
return s
config_parser = SafeConfigParser()
config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf"))))
return dict(config_parser.items("all"))
def connect_JSON(config):
"""Connect to a bitcoin JSON-RPC server"""
testnet = config.get('testnet', '0')
testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False
if not 'rpcport' in config:
config['rpcport'] = 16556 if testnet else 15556
connect = "http://%s:%[email protected]:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport'])
try:
result = ServiceProxy(connect)
# ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors,
# but also make sure the bitcoind we're talking to is/isn't testnet:
if result.getmininginfo()['testnet'] != testnet:
sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n")
sys.exit(1)
return result
except:
sys.stderr.write("Error connecting to RPC server at "+connect+"\n")
sys.exit(1)
def unlock_wallet(bitcoind):
info = bitcoind.getinfo()
if 'unlocked_until' not in info:
return True # wallet is not encrypted
t = int(info['unlocked_until'])
if t <= time.time():
try:
passphrase = getpass.getpass("Wallet is locked; enter passphrase: ")
bitcoind.walletpassphrase(passphrase, 5)
except:
sys.stderr.write("Wrong passphrase\n")
info = bitcoind.getinfo()
return int(info['unlocked_until']) > time.time()
def list_available(bitcoind):
address_summary = dict()
address_to_account = dict()
for info in bitcoind.listreceivedbyaddress(0):
address_to_account[info["address"]] = info["account"]
unspent = bitcoind.listunspent(0)
for output in unspent:
# listunspent doesn't give addresses, so:
rawtx = bitcoind.getrawtransaction(output['txid'], 1)
vout = rawtx["vout"][output['vout']]
pk = vout["scriptPubKey"]
# This code only deals with ordinary pay-to-bitcoin-address
# or pay-to-script-hash outputs right now; anything exotic is ignored.
if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash":
continue
address = pk["addresses"][0]
if address in address_summary:
address_summary[address]["total"] += vout["value"]
address_summary[address]["outputs"].append(output)
else:
address_summary[address] = {
"total" : vout["value"],
"outputs" : [output],
"account" : address_to_account.get(address, "")
}
return address_summary
def select_coins(needed, inputs):
# Feel free to improve this, this is good enough for my simple needs:
outputs = []
have = Decimal("0.0")
n = 0
while have < needed and n < len(inputs):
outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]})
have += inputs[n]["amount"]
n += 1
return (outputs, have-needed)
def create_tx(bitcoind, fromaddresses, toaddress, amount, fee):
all_coins = list_available(bitcoind)
total_available = Decimal("0.0")
needed = amount+fee
potential_inputs = []
for addr in fromaddresses:
if addr not in all_coins:
continue
potential_inputs.extend(all_coins[addr]["outputs"])
total_available += all_coins[addr]["total"]
if total_available < needed:
sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed));
sys.exit(1)
#
# Note:
# Python's json/jsonrpc modules have inconsistent support for Decimal numbers.
# Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode
# Decimals, I'm casting amounts to float before sending them to bitcoind.
#
outputs = { toaddress : float(amount) }
(inputs, change_amount) = select_coins(needed, potential_inputs)
if change_amount > BASE_FEE: # don't bother with zero or tiny change
change_address = fromaddresses[-1]
if change_address in outputs:
outputs[change_address] += float(change_amount)
else:
outputs[change_address] = float(change_amount)
rawtx = bitcoind.createrawtransaction(inputs, outputs)
signed_rawtx = bitcoind.signrawtransaction(rawtx)
if not signed_rawtx["complete"]:
sys.stderr.write("signrawtransaction failed\n")
sys.exit(1)
txdata = signed_rawtx["hex"]
return txdata
def compute_amount_in(bitcoind, txinfo):
result = Decimal("0.0")
for vin in txinfo['vin']:
in_info = bitcoind.getrawtransaction(vin['txid'], 1)
vout = in_info['vout'][vin['vout']]
result = result + vout['value']
return result
def compute_amount_out(txinfo):
result = Decimal("0.0")
for vout in txinfo['vout']:
result = result + vout['value']
return result
def sanity_test_fee(bitcoind, txdata_hex, max_fee):
class FeeError(RuntimeError):
pass
try:
txinfo = bitcoind.decoderawtransaction(txdata_hex)
total_in = compute_amount_in(bitcoind, txinfo)
total_out = compute_amount_out(txinfo)
if total_in-total_out > max_fee:
raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out))
tx_size = len(txdata_hex)/2
kb = tx_size/1000 # integer division rounds down
if kb > 1 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes")
if total_in < 0.01 and fee < BASE_FEE:
raise FeeError("Rejecting no-fee, tiny-amount transaction")
# Exercise for the reader: compute transaction priority, and
# warn if this is a very-low-priority transaction
except FeeError as err:
sys.stderr.write((str(err)+"\n"))
sys.exit(1)
def main():
import optparse
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--from", dest="fromaddresses", default=None,
help="addresses to get bitcoins from")
parser.add_option("--to", dest="to", default=None,
help="address to get send bitcoins to")
parser.add_option("--amount", dest="amount", default=None,
help="amount to send")
parser.add_option("--fee", dest="fee", default="0.0",
help="fee to include")
parser.add_option("--datadir", dest="datadir", default=determine_db_dir(),
help="location of bitcoin.conf file with RPC username/password (default: %default)")
parser.add_option("--testnet", dest="testnet", default=False, action="store_true",
help="Use the test network")
parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true",
help="Don't broadcast the transaction, just create and print the transaction data")
(options, args) = parser.parse_args()
check_json_precision()
config = read_bitcoin_config(options.datadir)
if options.testnet: config['testnet'] = True
bitcoind = connect_JSON(config)
if options.amount is None:
address_summary = list_available(bitcoind)
for address,info in address_summary.iteritems():
n_transactions = len(info['outputs'])
if n_transactions > 1:
print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions))
else:
print("%s %.8f %s"%(address, info['total'], info['account']))
else:
fee = Decimal(options.fee)
amount = Decimal(options.amount)
while unlock_wallet(bitcoind) == False:
pass # Keep asking for passphrase until they get it right
txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee)
sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01"))
if options.dry_run:
print(txdata)
else:
txid = bitcoind.sendrawtransaction(txdata)
print(txid)
if __name__ == '__main__':
main()
|
py
|
1a5af345a98d89cf189b13682d0703d7079f4000
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: students.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='students.proto',
package='',
syntax='proto3',
serialized_pb=_b('\n\x0estudents.proto\"\x8d\x01\n\x07Student\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\tstudentId\x18\x02 \x01(\t\x12!\n\x04\x64\x65pt\x18\x03 \x01(\x0e\x32\x13.Student.Department\">\n\nDepartment\x12\x14\n\x10\x43OMPUTER_SCIENCE\x10\x00\x12\x1a\n\x16\x45LECTRICAL_ENGINEERING\x10\x01\"(\n\x0bStudentList\x12\x19\n\x07student\x18\x01 \x03(\x0b\x32\x08.Studentb\x06proto3')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_STUDENT_DEPARTMENT = _descriptor.EnumDescriptor(
name='Department',
full_name='Student.Department',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='COMPUTER_SCIENCE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ELECTRICAL_ENGINEERING', index=1, number=1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=98,
serialized_end=160,
)
_sym_db.RegisterEnumDescriptor(_STUDENT_DEPARTMENT)
_STUDENT = _descriptor.Descriptor(
name='Student',
full_name='Student',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='Student.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='studentId', full_name='Student.studentId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='dept', full_name='Student.dept', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_STUDENT_DEPARTMENT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=19,
serialized_end=160,
)
_STUDENTLIST = _descriptor.Descriptor(
name='StudentList',
full_name='StudentList',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='student', full_name='StudentList.student', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=162,
serialized_end=202,
)
_STUDENT.fields_by_name['dept'].enum_type = _STUDENT_DEPARTMENT
_STUDENT_DEPARTMENT.containing_type = _STUDENT
_STUDENTLIST.fields_by_name['student'].message_type = _STUDENT
DESCRIPTOR.message_types_by_name['Student'] = _STUDENT
DESCRIPTOR.message_types_by_name['StudentList'] = _STUDENTLIST
Student = _reflection.GeneratedProtocolMessageType('Student', (_message.Message,), dict(
DESCRIPTOR = _STUDENT,
__module__ = 'students_pb2'
# @@protoc_insertion_point(class_scope:Student)
))
_sym_db.RegisterMessage(Student)
StudentList = _reflection.GeneratedProtocolMessageType('StudentList', (_message.Message,), dict(
DESCRIPTOR = _STUDENTLIST,
__module__ = 'students_pb2'
# @@protoc_insertion_point(class_scope:StudentList)
))
_sym_db.RegisterMessage(StudentList)
# @@protoc_insertion_point(module_scope)
|
py
|
1a5af3b404106b579b499fbd47bcdf0e79d04ea6
|
import asyncio
import os.path
import time
import sys
import platform
import queue
import traceback
import os
import webbrowser
from decimal import Decimal
from functools import partial, lru_cache
from typing import (NamedTuple, Callable, Optional, TYPE_CHECKING, Union, List, Dict, Any,
Sequence, Iterable)
from PyQt5.QtGui import (QFont, QColor, QCursor, QPixmap, QStandardItem,
QPalette, QIcon, QFontMetrics, QShowEvent)
from PyQt5.QtCore import (Qt, QPersistentModelIndex, QModelIndex, pyqtSignal,
QCoreApplication, QItemSelectionModel, QThread,
QSortFilterProxyModel, QSize, QLocale, QAbstractItemModel)
from PyQt5.QtWidgets import (QPushButton, QLabel, QMessageBox, QHBoxLayout,
QAbstractItemView, QVBoxLayout, QLineEdit,
QStyle, QDialog, QGroupBox, QButtonGroup, QRadioButton,
QFileDialog, QWidget, QToolButton, QTreeView, QPlainTextEdit,
QHeaderView, QApplication, QToolTip, QTreeWidget, QStyledItemDelegate,
QMenu)
from electrum_ltc.i18n import _, languages
from electrum_ltc.util import FileImportFailed, FileExportFailed, make_aiohttp_session, resource_path
from electrum_ltc.invoices import PR_UNPAID, PR_PAID, PR_EXPIRED, PR_INFLIGHT, PR_UNKNOWN, PR_FAILED, PR_ROUTING
if TYPE_CHECKING:
from .main_window import ElectrumWindow
from .installwizard import InstallWizard
if platform.system() == 'Windows':
MONOSPACE_FONT = 'Lucida Console'
elif platform.system() == 'Darwin':
MONOSPACE_FONT = 'Monaco'
else:
MONOSPACE_FONT = 'monospace'
dialogs = []
pr_icons = {
PR_UNKNOWN:"warning.png",
PR_UNPAID:"unpaid.png",
PR_PAID:"confirmed.png",
PR_EXPIRED:"expired.png",
PR_INFLIGHT:"unconfirmed.png",
PR_FAILED:"warning.png",
PR_ROUTING:"unconfirmed.png",
}
# filter tx files in QFileDialog:
TRANSACTION_FILE_EXTENSION_FILTER_ANY = "Transaction (*.txn *.psbt);;All files (*)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX = "Partial Transaction (*.psbt)"
TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX = "Complete Transaction (*.txn)"
TRANSACTION_FILE_EXTENSION_FILTER_SEPARATE = (f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_PARTIAL_TX};;"
f"{TRANSACTION_FILE_EXTENSION_FILTER_ONLY_COMPLETE_TX};;"
f"All files (*)")
class EnterButton(QPushButton):
def __init__(self, text, func):
QPushButton.__init__(self, text)
self.func = func
self.clicked.connect(func)
def keyPressEvent(self, e):
if e.key() in [ Qt.Key_Return, Qt.Key_Enter ]:
self.func()
class ThreadedButton(QPushButton):
def __init__(self, text, task, on_success=None, on_error=None):
QPushButton.__init__(self, text)
self.task = task
self.on_success = on_success
self.on_error = on_error
self.clicked.connect(self.run_task)
def run_task(self):
self.setEnabled(False)
self.thread = TaskThread(self)
self.thread.add(self.task, self.on_success, self.done, self.on_error)
def done(self):
self.setEnabled(True)
self.thread.stop()
class WWLabel(QLabel):
def __init__ (self, text="", parent=None):
QLabel.__init__(self, text, parent)
self.setWordWrap(True)
self.setTextInteractionFlags(Qt.TextSelectableByMouse)
class HelpLabel(QLabel):
def __init__(self, text, help_text):
QLabel.__init__(self, text)
self.help_text = help_text
self.app = QCoreApplication.instance()
self.font = QFont()
def mouseReleaseEvent(self, x):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text)
def enterEvent(self, event):
self.font.setUnderline(True)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
return QLabel.enterEvent(self, event)
def leaveEvent(self, event):
self.font.setUnderline(False)
self.setFont(self.font)
self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
return QLabel.leaveEvent(self, event)
class HelpButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, '?')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Help'),
text=self.help_text,
rich_text=True)
class InfoButton(QPushButton):
def __init__(self, text):
QPushButton.__init__(self, 'Info')
self.help_text = text
self.setFocusPolicy(Qt.NoFocus)
self.setFixedWidth(6 * char_width_in_lineedit())
self.clicked.connect(self.onclick)
def onclick(self):
custom_message_box(icon=QMessageBox.Information,
parent=self,
title=_('Info'),
text=self.help_text,
rich_text=True)
class Buttons(QHBoxLayout):
def __init__(self, *buttons):
QHBoxLayout.__init__(self)
self.addStretch(1)
for b in buttons:
if b is None:
continue
self.addWidget(b)
class CloseButton(QPushButton):
def __init__(self, dialog):
QPushButton.__init__(self, _("Close"))
self.clicked.connect(dialog.close)
self.setDefault(True)
class CopyButton(QPushButton):
def __init__(self, text_getter, app):
QPushButton.__init__(self, _("Copy"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
class CopyCloseButton(QPushButton):
def __init__(self, text_getter, app, dialog):
QPushButton.__init__(self, _("Copy and Close"))
self.clicked.connect(lambda: app.clipboard().setText(text_getter()))
self.clicked.connect(dialog.close)
self.setDefault(True)
class OkButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("OK"))
self.clicked.connect(dialog.accept)
self.setDefault(True)
class CancelButton(QPushButton):
def __init__(self, dialog, label=None):
QPushButton.__init__(self, label or _("Cancel"))
self.clicked.connect(dialog.reject)
class MessageBoxMixin(object):
def top_level_window_recurse(self, window=None, test_func=None):
window = window or self
classes = (WindowModalDialog, QMessageBox)
if test_func is None:
test_func = lambda x: True
for n, child in enumerate(window.children()):
# Test for visibility as old closed dialogs may not be GC-ed.
# Only accept children that confirm to test_func.
if isinstance(child, classes) and child.isVisible() \
and test_func(child):
return self.top_level_window_recurse(child, test_func=test_func)
return window
def top_level_window(self, test_func=None):
return self.top_level_window_recurse(test_func)
def question(self, msg, parent=None, title=None, icon=None, **kwargs) -> bool:
Yes, No = QMessageBox.Yes, QMessageBox.No
return Yes == self.msg_box(icon=icon or QMessageBox.Question,
parent=parent,
title=title or '',
text=msg,
buttons=Yes|No,
defaultButton=No,
**kwargs)
def show_warning(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
title or _('Warning'), msg, **kwargs)
def show_error(self, msg, parent=None, **kwargs):
return self.msg_box(QMessageBox.Warning, parent,
_('Error'), msg, **kwargs)
def show_critical(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Critical, parent,
title or _('Critical Error'), msg, **kwargs)
def show_message(self, msg, parent=None, title=None, **kwargs):
return self.msg_box(QMessageBox.Information, parent,
title or _('Information'), msg, **kwargs)
def msg_box(self, icon, parent, title, text, *, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
parent = parent or self.top_level_window()
return custom_message_box(icon=icon,
parent=parent,
title=title,
text=text,
buttons=buttons,
defaultButton=defaultButton,
rich_text=rich_text,
checkbox=checkbox)
def custom_message_box(*, icon, parent, title, text, buttons=QMessageBox.Ok,
defaultButton=QMessageBox.NoButton, rich_text=False,
checkbox=None):
if type(icon) is QPixmap:
d = QMessageBox(QMessageBox.Information, title, str(text), buttons, parent)
d.setIconPixmap(icon)
else:
d = QMessageBox(icon, title, str(text), buttons, parent)
d.setWindowModality(Qt.WindowModal)
d.setDefaultButton(defaultButton)
if rich_text:
d.setTextInteractionFlags(Qt.TextSelectableByMouse | Qt.LinksAccessibleByMouse)
# set AutoText instead of RichText
# AutoText lets Qt figure out whether to render as rich text.
# e.g. if text is actually plain text and uses "\n" newlines;
# and we set RichText here, newlines would be swallowed
d.setTextFormat(Qt.AutoText)
else:
d.setTextInteractionFlags(Qt.TextSelectableByMouse)
d.setTextFormat(Qt.PlainText)
if checkbox is not None:
d.setCheckBox(checkbox)
return d.exec_()
class WindowModalDialog(QDialog, MessageBoxMixin):
'''Handy wrapper; window modal dialogs are better for our multi-window
daemon model as other wallet windows can still be accessed.'''
def __init__(self, parent, title=None):
QDialog.__init__(self, parent)
self.setWindowModality(Qt.WindowModal)
if title:
self.setWindowTitle(title)
class WaitingDialog(WindowModalDialog):
'''Shows a please wait dialog whilst running a task. It is not
necessary to maintain a reference to this dialog.'''
def __init__(self, parent: QWidget, message: str, task, on_success=None, on_error=None):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.accepted.connect(self.on_accepted)
self.show()
self.thread = TaskThread(self)
self.thread.finished.connect(self.deleteLater) # see #3956
self.thread.add(task, on_success, self.accept, on_error)
def wait(self):
self.thread.wait()
def on_accepted(self):
self.thread.stop()
def update(self, msg):
print(msg)
self.message_label.setText(msg)
class BlockingWaitingDialog(WindowModalDialog):
"""Shows a waiting dialog whilst running a task.
Should be called from the GUI thread. The GUI thread will be blocked while
the task is running; the point of the dialog is to provide feedback
to the user regarding what is going on.
"""
def __init__(self, parent: QWidget, message: str, task: Callable[[], Any]):
assert parent
if isinstance(parent, MessageBoxMixin):
parent = parent.top_level_window()
WindowModalDialog.__init__(self, parent, _("Please wait"))
self.message_label = QLabel(message)
vbox = QVBoxLayout(self)
vbox.addWidget(self.message_label)
self.show()
QCoreApplication.processEvents()
task()
self.accept()
def line_dialog(parent, title, label, ok_label, default=None):
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(500)
l = QVBoxLayout()
dialog.setLayout(l)
l.addWidget(QLabel(label))
txt = QLineEdit()
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.text()
def text_dialog(parent, title, header_layout, ok_label, default=None, allow_multi=False):
from .qrtextedit import ScanQRTextEdit
dialog = WindowModalDialog(parent, title)
dialog.setMinimumWidth(600)
l = QVBoxLayout()
dialog.setLayout(l)
if isinstance(header_layout, str):
l.addWidget(QLabel(header_layout))
else:
l.addLayout(header_layout)
txt = ScanQRTextEdit(allow_multi=allow_multi)
if default:
txt.setText(default)
l.addWidget(txt)
l.addLayout(Buttons(CancelButton(dialog), OkButton(dialog, ok_label)))
if dialog.exec_():
return txt.toPlainText()
class ChoicesLayout(object):
def __init__(self, msg, choices, on_clicked=None, checked_index=0):
vbox = QVBoxLayout()
if len(msg) > 50:
vbox.addWidget(WWLabel(msg))
msg = ""
gb2 = QGroupBox(msg)
vbox.addWidget(gb2)
vbox2 = QVBoxLayout()
gb2.setLayout(vbox2)
self.group = group = QButtonGroup()
for i,c in enumerate(choices):
button = QRadioButton(gb2)
button.setText(c)
vbox2.addWidget(button)
group.addButton(button)
group.setId(button, i)
if i==checked_index:
button.setChecked(True)
if on_clicked:
group.buttonClicked.connect(partial(on_clicked, self))
self.vbox = vbox
def layout(self):
return self.vbox
def selected_index(self):
return self.group.checkedId()
def address_field(addresses):
hbox = QHBoxLayout()
address_e = QLineEdit()
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
else:
addresses = []
def func():
try:
i = addresses.index(str(address_e.text())) + 1
i = i % len(addresses)
address_e.setText(addresses[i])
except ValueError:
# the user might have changed address_e to an
# address not in the wallet (or to something that isn't an address)
if addresses and len(addresses) > 0:
address_e.setText(addresses[0])
button = QPushButton(_('Address'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(address_e)
return hbox, address_e
def filename_field(parent, config, defaultname, select_msg):
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Format")))
gb = QGroupBox("format", parent)
b1 = QRadioButton(gb)
b1.setText(_("CSV"))
b1.setChecked(True)
b2 = QRadioButton(gb)
b2.setText(_("json"))
vbox.addWidget(b1)
vbox.addWidget(b2)
hbox = QHBoxLayout()
directory = config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, defaultname )
filename_e = QLineEdit()
filename_e.setText(path)
def func():
text = filename_e.text()
_filter = "*.csv" if text.endswith(".csv") else "*.json" if text.endswith(".json") else None
p, __ = QFileDialog.getSaveFileName(None, select_msg, text, _filter)
if p:
filename_e.setText(p)
button = QPushButton(_('File'))
button.clicked.connect(func)
hbox.addWidget(button)
hbox.addWidget(filename_e)
vbox.addLayout(hbox)
def set_csv(v):
text = filename_e.text()
text = text.replace(".json",".csv") if v else text.replace(".csv",".json")
filename_e.setText(text)
b1.clicked.connect(lambda: set_csv(True))
b2.clicked.connect(lambda: set_csv(False))
return vbox, filename_e, b1
class ElectrumItemDelegate(QStyledItemDelegate):
def __init__(self, tv: 'MyTreeView'):
super().__init__(tv)
self.tv = tv
self.opened = None
def on_closeEditor(editor: QLineEdit, hint):
self.opened = None
self.tv.is_editor_open = False
if self.tv._pending_update:
self.tv.update()
def on_commitData(editor: QLineEdit):
new_text = editor.text()
idx = QModelIndex(self.opened)
row, col = idx.row(), idx.column()
_prior_text, user_role = self.tv.get_text_and_userrole_from_coordinate(row, col)
# check that we didn't forget to set UserRole on an editable field
assert user_role is not None, (row, col)
self.tv.on_edited(idx, user_role, new_text)
self.closeEditor.connect(on_closeEditor)
self.commitData.connect(on_commitData)
def createEditor(self, parent, option, idx):
self.opened = QPersistentModelIndex(idx)
self.tv.is_editor_open = True
return super().createEditor(parent, option, idx)
class MyTreeView(QTreeView):
ROLE_CLIPBOARD_DATA = Qt.UserRole + 100
filter_columns: Iterable[int]
def __init__(self, parent: 'ElectrumWindow', create_menu, *,
stretch_column=None, editable_columns=None):
super().__init__(parent)
self.parent = parent
self.config = self.parent.config
self.stretch_column = stretch_column
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(create_menu)
self.setUniformRowHeights(True)
# Control which columns are editable
if editable_columns is not None:
editable_columns = set(editable_columns)
elif stretch_column is not None:
editable_columns = {stretch_column}
else:
editable_columns = {}
self.editable_columns = editable_columns
self.setItemDelegate(ElectrumItemDelegate(self))
self.current_filter = ""
self.is_editor_open = False
self.setRootIsDecorated(False) # remove left margin
self.toolbar_shown = False
# When figuring out the size of columns, Qt by default looks at
# the first 1000 rows (at least if resize mode is QHeaderView.ResizeToContents).
# This would be REALLY SLOW, and it's not perfect anyway.
# So to speed the UI up considerably, set it to
# only look at as many rows as currently visible.
self.header().setResizeContentsPrecision(0)
self._pending_update = False
self._forced_update = False
def set_editability(self, items):
for idx, i in enumerate(items):
i.setEditable(idx in self.editable_columns)
def selected_in_column(self, column: int):
items = self.selectionModel().selectedIndexes()
return list(x for x in items if x.column() == column)
def current_item_user_role(self, col) -> Any:
idx = self.selectionModel().currentIndex()
idx = idx.sibling(idx.row(), col)
item = self.item_from_index(idx)
if item:
return item.data(Qt.UserRole)
def item_from_index(self, idx: QModelIndex) -> Optional[QStandardItem]:
model = self.model()
if isinstance(model, QSortFilterProxyModel):
idx = model.mapToSource(idx)
return model.sourceModel().itemFromIndex(idx)
else:
return model.itemFromIndex(idx)
def original_model(self) -> QAbstractItemModel:
model = self.model()
if isinstance(model, QSortFilterProxyModel):
return model.sourceModel()
else:
return model
def set_current_idx(self, set_current: QPersistentModelIndex):
if set_current:
assert isinstance(set_current, QPersistentModelIndex)
assert set_current.isValid()
self.selectionModel().select(QModelIndex(set_current), QItemSelectionModel.SelectCurrent)
def update_headers(self, headers: Union[List[str], Dict[int, str]]):
# headers is either a list of column names, or a dict: (col_idx->col_name)
if not isinstance(headers, dict): # convert to dict
headers = dict(enumerate(headers))
col_names = [headers[col_idx] for col_idx in sorted(headers.keys())]
self.original_model().setHorizontalHeaderLabels(col_names)
self.header().setStretchLastSection(False)
for col_idx in headers:
sm = QHeaderView.Stretch if col_idx == self.stretch_column else QHeaderView.ResizeToContents
self.header().setSectionResizeMode(col_idx, sm)
def keyPressEvent(self, event):
if self.itemDelegate().opened:
return
if event.key() in [ Qt.Key_F2, Qt.Key_Return, Qt.Key_Enter ]:
self.on_activated(self.selectionModel().currentIndex())
return
super().keyPressEvent(event)
def on_activated(self, idx):
# on 'enter' we show the menu
pt = self.visualRect(idx).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def edit(self, idx, trigger=QAbstractItemView.AllEditTriggers, event=None):
"""
this is to prevent:
edit: editing failed
from inside qt
"""
return super().edit(idx, trigger, event)
def on_edited(self, idx: QModelIndex, user_role, text):
self.parent.wallet.set_label(user_role, text)
self.parent.history_model.refresh('on_edited in MyTreeView')
self.parent.utxo_list.update()
self.parent.update_completions()
def should_hide(self, row):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
return False
def get_text_and_userrole_from_coordinate(self, row_num, column):
idx = self.model().index(row_num, column)
item = self.item_from_index(idx)
user_role = item.data(Qt.UserRole)
return item.text(), user_role
def hide_row(self, row_num):
"""
row_num is for self.model(). So if there is a proxy, it is the row number
in that!
"""
should_hide = self.should_hide(row_num)
if not self.current_filter and should_hide is None:
# no filters at all, neither date nor search
self.setRowHidden(row_num, QModelIndex(), False)
return
for column in self.filter_columns:
txt, _ = self.get_text_and_userrole_from_coordinate(row_num, column)
txt = txt.lower()
if self.current_filter in txt:
# the filter matched, but the date filter might apply
self.setRowHidden(row_num, QModelIndex(), bool(should_hide))
break
else:
# we did not find the filter in any columns, hide the item
self.setRowHidden(row_num, QModelIndex(), True)
def filter(self, p=None):
if p is not None:
p = p.lower()
self.current_filter = p
self.hide_rows()
def hide_rows(self):
for row in range(self.model().rowCount()):
self.hide_row(row)
def create_toolbar(self, config=None):
hbox = QHBoxLayout()
buttons = self.get_toolbar_buttons()
for b in buttons:
b.setVisible(False)
hbox.addWidget(b)
hide_button = QPushButton('x')
hide_button.setVisible(False)
hide_button.pressed.connect(lambda: self.show_toolbar(False, config))
self.toolbar_buttons = buttons + (hide_button,)
hbox.addStretch()
hbox.addWidget(hide_button)
return hbox
def save_toolbar_state(self, state, config):
pass # implemented in subclasses
def show_toolbar(self, state, config=None):
if state == self.toolbar_shown:
return
self.toolbar_shown = state
if config:
self.save_toolbar_state(state, config)
for b in self.toolbar_buttons:
b.setVisible(state)
if not state:
self.on_hide_toolbar()
def toggle_toolbar(self, config=None):
self.show_toolbar(not self.toolbar_shown, config)
def add_copy_menu(self, menu: QMenu, idx) -> QMenu:
cc = menu.addMenu(_("Copy"))
for column in self.Columns:
column_title = self.original_model().horizontalHeaderItem(column).text()
item_col = self.item_from_index(idx.sibling(idx.row(), column))
clipboard_data = item_col.data(self.ROLE_CLIPBOARD_DATA)
if clipboard_data is None:
clipboard_data = item_col.text().strip()
cc.addAction(column_title,
lambda text=clipboard_data, title=column_title:
self.place_text_on_clipboard(text, title=title))
return cc
def place_text_on_clipboard(self, text: str, *, title: str = None) -> None:
self.parent.do_copy(text, title=title)
def showEvent(self, e: 'QShowEvent'):
super().showEvent(e)
if e.isAccepted() and self._pending_update:
self._forced_update = True
self.update()
self._forced_update = False
def maybe_defer_update(self) -> bool:
"""Returns whether we should defer an update/refresh."""
defer = (not self._forced_update
and (not self.isVisible() or self.is_editor_open))
# side-effect: if we decide to defer update, the state will become stale:
self._pending_update = defer
return defer
class MySortModel(QSortFilterProxyModel):
def __init__(self, parent, *, sort_role):
super().__init__(parent)
self._sort_role = sort_role
def lessThan(self, source_left: QModelIndex, source_right: QModelIndex):
item1 = self.sourceModel().itemFromIndex(source_left)
item2 = self.sourceModel().itemFromIndex(source_right)
data1 = item1.data(self._sort_role)
data2 = item2.data(self._sort_role)
if data1 is not None and data2 is not None:
return data1 < data2
v1 = item1.text()
v2 = item2.text()
try:
return Decimal(v1) < Decimal(v2)
except:
return v1 < v2
class ButtonsWidget(QWidget):
def __init__(self):
super(QWidget, self).__init__()
self.buttons = [] # type: List[QToolButton]
def resizeButtons(self):
frameWidth = self.style().pixelMetric(QStyle.PM_DefaultFrameWidth)
x = self.rect().right() - frameWidth - 10
y = self.rect().bottom() - frameWidth
for button in self.buttons:
sz = button.sizeHint()
x -= sz.width()
button.move(x, y - sz.height())
def addButton(self, icon_name, on_click, tooltip):
button = QToolButton(self)
button.setIcon(read_QIcon(icon_name))
button.setIconSize(QSize(25,25))
button.setCursor(QCursor(Qt.PointingHandCursor))
button.setStyleSheet("QToolButton { border: none; hover {border: 1px} pressed {border: 1px} padding: 0px; }")
button.setVisible(True)
button.setToolTip(tooltip)
button.clicked.connect(on_click)
self.buttons.append(button)
return button
def addCopyButton(self, app):
self.app = app
self.addButton("copy.png", self.on_copy, _("Copy to clipboard"))
def on_copy(self):
self.app.clipboard().setText(self.text())
QToolTip.showText(QCursor.pos(), _("Text copied to clipboard"), self)
class ButtonsLineEdit(QLineEdit, ButtonsWidget):
def __init__(self, text=None):
QLineEdit.__init__(self, text)
self.buttons = []
def resizeEvent(self, e):
o = QLineEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class ButtonsTextEdit(QPlainTextEdit, ButtonsWidget):
def __init__(self, text=None):
QPlainTextEdit.__init__(self, text)
self.setText = self.setPlainText
self.text = self.toPlainText
self.buttons = []
def resizeEvent(self, e):
o = QPlainTextEdit.resizeEvent(self, e)
self.resizeButtons()
return o
class PasswordLineEdit(QLineEdit):
def __init__(self, *args, **kwargs):
QLineEdit.__init__(self, *args, **kwargs)
self.setEchoMode(QLineEdit.Password)
def clear(self):
# Try to actually overwrite the memory.
# This is really just a best-effort thing...
self.setText(len(self.text()) * " ")
super().clear()
class TaskThread(QThread):
'''Thread that runs background tasks. Callbacks are guaranteed
to happen in the context of its parent.'''
class Task(NamedTuple):
task: Callable
cb_success: Optional[Callable]
cb_done: Optional[Callable]
cb_error: Optional[Callable]
doneSig = pyqtSignal(object, object, object)
def __init__(self, parent, on_error=None):
super(TaskThread, self).__init__(parent)
self.on_error = on_error
self.tasks = queue.Queue()
self.doneSig.connect(self.on_done)
self.start()
def add(self, task, on_success=None, on_done=None, on_error=None):
on_error = on_error or self.on_error
self.tasks.put(TaskThread.Task(task, on_success, on_done, on_error))
def run(self):
while True:
task = self.tasks.get() # type: TaskThread.Task
if not task:
break
try:
result = task.task()
self.doneSig.emit(result, task.cb_done, task.cb_success)
except BaseException:
self.doneSig.emit(sys.exc_info(), task.cb_done, task.cb_error)
def on_done(self, result, cb_done, cb_result):
# This runs in the parent's thread.
if cb_done:
cb_done()
if cb_result:
cb_result(result)
def stop(self):
self.tasks.put(None)
class ColorSchemeItem:
def __init__(self, fg_color, bg_color):
self.colors = (fg_color, bg_color)
def _get_color(self, background):
return self.colors[(int(background) + int(ColorScheme.dark_scheme)) % 2]
def as_stylesheet(self, background=False):
css_prefix = "background-" if background else ""
color = self._get_color(background)
return "QWidget {{ {}color:{}; }}".format(css_prefix, color)
def as_color(self, background=False):
color = self._get_color(background)
return QColor(color)
class ColorScheme:
dark_scheme = False
GREEN = ColorSchemeItem("#117c11", "#8af296")
YELLOW = ColorSchemeItem("#897b2a", "#ffff00")
RED = ColorSchemeItem("#7c1111", "#f18c8c")
BLUE = ColorSchemeItem("#123b7c", "#8cb3f2")
DEFAULT = ColorSchemeItem("black", "white")
GRAY = ColorSchemeItem("gray", "gray")
@staticmethod
def has_dark_background(widget):
brightness = sum(widget.palette().color(QPalette.Background).getRgb()[0:3])
return brightness < (255*3/2)
@staticmethod
def update_from_widget(widget, force_dark=False):
if force_dark or ColorScheme.has_dark_background(widget):
ColorScheme.dark_scheme = True
class AcceptFileDragDrop:
def __init__(self, file_type=""):
assert isinstance(self, QWidget)
self.setAcceptDrops(True)
self.file_type = file_type
def validateEvent(self, event):
if not event.mimeData().hasUrls():
event.ignore()
return False
for url in event.mimeData().urls():
if not url.toLocalFile().endswith(self.file_type):
event.ignore()
return False
event.accept()
return True
def dragEnterEvent(self, event):
self.validateEvent(event)
def dragMoveEvent(self, event):
if self.validateEvent(event):
event.setDropAction(Qt.CopyAction)
def dropEvent(self, event):
if self.validateEvent(event):
for url in event.mimeData().urls():
self.onFileAdded(url.toLocalFile())
def onFileAdded(self, fn):
raise NotImplementedError()
def import_meta_gui(electrum_window, title, importer, on_success):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getOpenFileName(_("Open {} file").format(title), filter_)
if not filename:
return
try:
importer(filename)
except FileImportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {} were successfully imported").format(title))
on_success()
def export_meta_gui(electrum_window, title, exporter):
filter_ = "JSON (*.json);;All files (*)"
filename = electrum_window.getSaveFileName(_("Select file to save your {}").format(title),
'electrum-ltc_{}.json'.format(title), filter_)
if not filename:
return
try:
exporter(filename)
except FileExportFailed as e:
electrum_window.show_critical(str(e))
else:
electrum_window.show_message(_("Your {0} were exported to '{1}'")
.format(title, str(filename)))
def get_parent_main_window(
widget, *, allow_wizard: bool = False,
) -> Union[None, 'ElectrumWindow', 'InstallWizard']:
"""Returns a reference to the ElectrumWindow this widget belongs to."""
from .main_window import ElectrumWindow
from .transaction_dialog import TxDialog
from .installwizard import InstallWizard
for _ in range(100):
if widget is None:
return None
if isinstance(widget, ElectrumWindow):
return widget
if isinstance(widget, TxDialog):
return widget.main_window
if isinstance(widget, InstallWizard) and allow_wizard:
return widget
widget = widget.parentWidget()
return None
def icon_path(icon_basename):
return resource_path('gui', 'icons', icon_basename)
@lru_cache(maxsize=1000)
def read_QIcon(icon_basename):
return QIcon(icon_path(icon_basename))
def get_default_language():
name = QLocale.system().name()
return name if name in languages else 'en_UK'
def char_width_in_lineedit() -> int:
char_width = QFontMetrics(QLineEdit().font()).averageCharWidth()
# 'averageCharWidth' seems to underestimate on Windows, hence 'max()'
return max(9, char_width)
def webopen(url: str):
if sys.platform == 'linux' and os.environ.get('APPIMAGE'):
# When on Linux webbrowser.open can fail in AppImage because it can't find the correct libdbus.
# We just fork the process and unset LD_LIBRARY_PATH before opening the URL.
# See #5425
if os.fork() == 0:
del os.environ['LD_LIBRARY_PATH']
webbrowser.open(url)
os._exit(0)
else:
webbrowser.open(url)
if __name__ == "__main__":
app = QApplication([])
t = WaitingDialog(None, 'testing ...', lambda: [time.sleep(1)], lambda x: QMessageBox.information(None, 'done', "done"))
t.start()
app.exec_()
|
py
|
1a5af451bd574cd1d23ba75f5426e17f16722fde
|
Arduino = Runtime.createAndStart("arduino","Arduino")
Arduino.connect("COM8")
Arduino.setBoardNano()
def publishPin(pins):
for pin in range(0, len(pins)):
print pins[pin].address, pins[pin].value
Arduino.addListener("publishPinArray","python","publishPin")
Arduino.enablePin(14,2)
Arduino.setAref("INTERNAL")
|
py
|
1a5af45657bb2497f67a675a76dc0e20b2e1858c
|
import m5
from m5.objects import LocalBP
m5.util.addToPath('../')
class LocalBPWrap(LocalBP):
def __init__(self, options=None):
super(LocalBPWrap, self).__init__()
pass
class LocalBPPar(LocalBPWrap):
def __init__(self, options=None):
super(LocalBPPar, self).__init__(options)
if not options or not options.local_predictor_size:
return
self.localPredictorSize = options.local_predictor_size
if not options or not options.local_ctr_bits:
return
self.localCtrBits = options.local_ctr_bits
if not options or not options.btb_entries:
return
self.BTBEntries = options.btb_entries
|
py
|
1a5af4632ca08ace5521df87433e4de3273ac0c2
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0004_auto_20150820_2156'),
]
operations = [
migrations.AlterModelOptions(
name='product',
options={'ordering': ['-title']},
),
]
|
py
|
1a5af533f6d14efda63db6afbb24b30ce51ea966
|
import argparse
import os
import shutil
import sys
import cv2
def label(img_dir, scale_factor):
img_extensions = {'jpg', 'jpeg', 'png', 'bmp'}
images = sorted([os.path.join(img_dir, f)
for f in os.listdir(img_dir)
if os.path.isfile(os.path.join(img_dir, f)) and
f.lower().split('.')[-1] in img_extensions])
labels = [f for i, f in enumerate(sorted(os.listdir(img_dir)))
if os.path.isdir(os.path.join(img_dir, f))]
if not labels:
raise RuntimeError('No subdirectories found. Please create subdirectories for ' +
'the labels you want to store (e.g. "negative", "positive")')
for imgfile in images:
img = cv2.imread(imgfile)
img_name = os.path.basename(imgfile)
if scale_factor != 1:
size = (int(img.shape[0]*scale_factor), int(img.shape[1]*scale_factor))
img = cv2.resize(img, size)
print('[{}] Keys:'.format(os.path.basename(imgfile)))
for i, l in enumerate(labels):
print('\t({}): Tag image as "{}"'.format(i+1, l))
print('\t(s): Skip this image')
print('\t(d): Delete this image')
print('\t(ESC/q): Quit the script')
print('')
cv2.namedWindow(img_name)
cv2.imshow(img_name, img)
k = cv2.waitKey()
print('')
if k == ord('c'):
continue
if ord('0') <= k <= ord('9'):
label_index = int(chr(k))-1
if label_index >= len(labels):
print('Invalid label index "{}", skipping image'.format(label_index))
shutil.move(imgfile, os.path.join(img_dir, labels[label_index]))
if k == ord('d'):
os.unlink(imgfile)
# Quit upon 'q' or ESC
if k == ord('q') or k == 27:
break
print('')
cv2.destroyAllWindows()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--image-dir', '-d', dest='dir', required=True,
help='Directory that contains the images to be processed ' +
'(supported formats: jpg, png, tiff, bmp)')
parser.add_argument('--scale-factor', '-s', dest='scale', required=False, default=1,
type=float, help='Scale factor to be applied to the images for visualization (default: 1)')
opts, args = parser.parse_known_args(sys.argv[1:])
label(img_dir=opts.dir, scale_factor=opts.scale)
if __name__ == '__main__':
main()
# vim:sw=4:ts=4:et:
|
py
|
1a5af57b9ccf519d2052ec17151d6e023f2f8378
|
import tensorflow as tf
import abc
import logging
LOSS_REGISTRY = {}
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Default margin used by pairwise and absolute margin loss
DEFAULT_MARGIN = 1
# default sampling temperature used by adversarial loss
DEFAULT_ALPHA_ADVERSARIAL = 0.5
# Default margin used by margin based adversarial loss
DEFAULT_MARGIN_ADVERSARIAL = 3
DEFAULT_CLASS_PARAMS = {'require_same_size_pos_neg': True, }
def register_loss(name, external_params=[], class_params=DEFAULT_CLASS_PARAMS):
def populate_class_params():
LOSS_REGISTRY[name].class_params = {}
LOSS_REGISTRY[name].class_params['require_same_size_pos_neg'] = class_params.get('require_same_size_pos_neg',
DEFAULT_CLASS_PARAMS['require_same_size_pos_neg'])
def insert_in_registry(class_handle):
LOSS_REGISTRY[name] = class_handle
class_handle.name = name
LOSS_REGISTRY[name].external_params = external_params
populate_class_params()
return class_handle
return insert_in_registry
class Loss(abc.ABC):
"""Abstract class for loss function.
"""
name = ""
external_params = []
class_params = {}
def __init__(self, eta, hyperparam_dict, verbose=False):
"""Initialize Loss.
Parameters
----------
eta: int
number of negatives
hyperparam_dict : dict
dictionary of hyperparams.
(Keys are described in the hyperparameters section)
"""
self._loss_parameters = {}
self._dependencies = []
# perform check to see if all the required external hyperparams are passed
try:
self._loss_parameters['eta'] = eta
self._init_hyperparams(hyperparam_dict)
if verbose:
logger.info('\n--------- Loss ---------')
logger.info('Name : {}'.format(self.name))
for key, value in self._loss_parameters.items():
logger.info('{} : {}'.format(key, value))
except KeyError as e:
msg = 'Some of the hyperparams for loss were not passed to the loss function.\n{}'.format(e)
logger.error(msg)
raise Exception(msg)
def get_state(self, param_name):
"""Get the state value.
Parameters
----------
param_name : string
name of the state for which one wants to query the value
Returns
-------
param_value:
the value of the corresponding state
"""
try:
param_value = LOSS_REGISTRY[self.name].class_params.get(param_name)
return param_value
except KeyError as e:
msg = 'Invalid Keu.\n{}'.format(e)
logger.error(msg)
raise Exception(msg)
def _init_hyperparams(self, hyperparam_dict):
""" Initializes the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dictionary
Consists of key value pairs. The Loss will check the keys to get the corresponding params
"""
msg = 'This function is a placeholder in an abstract class'
logger.error(msg)
NotImplementedError(msg)
def _inputs_check(self, scores_pos, scores_neg):
""" Creates any dependencies that need to be checked before performing loss computations
Parameters
----------
scores_pos : tf.Tensor
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor
A tensor of scores assigned to negative statements.
"""
logger.debug('Creating dependencies before loss computations.')
self._dependencies = []
if LOSS_REGISTRY[self.name].class_params['require_same_size_pos_neg'] and self._loss_parameters['eta'] != 1:
logger.debug('Dependencies found: \n\tRequired same size positive and negative. \n\tEta is not 1.')
self._dependencies.append(tf.Assert(tf.equal(tf.shape(scores_pos)[0], tf.shape(scores_neg)[0]),
[tf.shape(scores_pos)[0], tf.shape(scores_neg)[0]]))
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function. Every inherited class must implement this function.
(All the TF code must go in this function.)
Parameters
----------
scores_pos : tf.Tensor
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
msg = 'This function is a placeholder in an abstract class.'
logger.error(msg)
NotImplementedError(msg)
def apply(self, scores_pos, scores_neg):
""" Interface to external world.
This function does the input checks, preprocesses input and finally applies loss function.
Parameters
----------
scores_pos : tf.Tensor
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
self._inputs_check(scores_pos, scores_neg)
with tf.control_dependencies(self._dependencies):
loss = self._apply(scores_pos, scores_neg)
return loss
@register_loss("pairwise", ['margin'])
class PairwiseLoss(Loss):
"""Pairwise, max-margin loss.
Introduced in :cite:`bordes2013translating`.
.. math::
\mathcal{L}(\Theta) = \sum_{t^+ \in \mathcal{G}}\sum_{t^- \in \mathcal{C}}max(0, [\gamma + f_{model}(t^-;\Theta) - f_{model}(t^+;\Theta)])
where :math:`\gamma` is the margin, :math:`\mathcal{G}` is the set of positives,
:math:`\mathcal{C}` is the set of corruptions, :math:`f_{model}(t;\Theta)` is the model-specific scoring function.
"""
def __init__(self, eta, loss_params={'margin': DEFAULT_MARGIN}, verbose=False):
"""Initialize Loss.
Parameters
----------
eta: int
number of negatives
loss_params : dict
Dictionary of loss-specific hyperparams:
- **'margin'**: (float). Margin to be used in pairwise loss computation (default: 1)
Example: ``loss_params={'margin': 1}``
"""
super().__init__(eta, loss_params, verbose)
def _init_hyperparams(self, hyperparam_dict):
""" Verifies and stores the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dictionary
Consists of key value pairs. The Loss will check the keys to get the corresponding params
- **margin** - Margin to be used in pairwise loss computation(default:1)
"""
self._loss_parameters['margin'] = hyperparam_dict.get('margin', DEFAULT_MARGIN)
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function.
Parameters
----------
scores_pos : tf.Tensor, shape [n, 1]
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor, shape [n, 1]
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
margin = tf.constant(self._loss_parameters['margin'], dtype=tf.float32, name='margin')
loss = tf.reduce_sum(tf.maximum(margin - scores_pos + scores_neg, 0))
return loss
@register_loss("nll")
class NLLLoss(Loss):
"""Negative log-likelihood loss.
As described in :cite:`trouillon2016complex`.
.. math::
\mathcal{L}(\Theta) = \sum_{t \in \mathcal{G} \cup \mathcal{C}}log(1 + exp(-y \, f_{model}(t;\Theta)))
where :math:`y \in {-1, 1}` is the label of the statement, :math:`\mathcal{G}` is the set of positives,
:math:`\mathcal{C}` is the set of corruptions, :math:`f_{model}(t;\Theta)` is the model-specific scoring function.
"""
def __init__(self, eta, loss_params={}, verbose=False):
"""Initialize Loss.
Parameters
----------
eta: int
number of negatives
loss_params : dict
dictionary of hyperparams. No hyperparameters are required for this loss.
"""
super().__init__(eta, loss_params, verbose)
def _init_hyperparams(self, hyperparam_dict):
""" Initializes the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dictionary
Consists of key value pairs. The Loss will check the keys to get the corresponding params
"""
return
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function.
Parameters
----------
scores_pos : tf.Tensor, shape [n, 1]
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor, shape [n, 1]
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
scores = tf.concat([-scores_pos, scores_neg], 0)
return tf.reduce_sum(tf.log(1 + tf.exp(scores)))
@register_loss("absolute_margin", ['margin'])
class AbsoluteMarginLoss(Loss):
"""Absolute margin , max-margin loss.
Introduced in :cite:`Hamaguchi2017`.
.. math::
\mathcal{L}(\Theta) = \sum_{t^+ \in \mathcal{G}}\sum_{t^- \in \mathcal{C}} f_{model}(t^-;\Theta) - max(0, [\gamma - f_{model}(t^+;\Theta)])
where :math:`\gamma` is the margin, :math:`\mathcal{G}` is the set of positives,
:math:`\mathcal{C}` is the set of corruptions, :math:`f_{model}(t;\Theta)` is the model-specific scoring function.
"""
def __init__(self, eta, loss_params={'margin': DEFAULT_MARGIN}, verbose=False):
"""Initialize Loss
Parameters
----------
eta: int
number of negatives
loss_params : dict
Dictionary of loss-specific hyperparams:
- **'margin'**: float. Margin to be used in pairwise loss computation (default:1)
Example: ``loss_params={'margin': 1}``
"""
super().__init__(eta, loss_params, verbose)
def _init_hyperparams(self, hyperparam_dict):
""" Initializes the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dict
Consists of key value pairs. The Loss will check the keys to get the corresponding params.
**margin** - Margin to be used in loss computation(default:1)
Returns
-------
"""
self._loss_parameters['margin'] = hyperparam_dict.get('margin', DEFAULT_MARGIN)
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function.
Parameters
----------
scores_pos : tf.Tensor, shape [n, 1]
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor, shape [n, 1]
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
margin = tf.constant(self._loss_parameters['margin'], dtype=tf.float32, name='margin')
loss = tf.reduce_sum(tf.maximum(margin + scores_neg, 0) - scores_pos)
return loss
@register_loss("self_adversarial", ['margin', 'alpha'], {'require_same_size_pos_neg': False})
class SelfAdversarialLoss(Loss):
""" Self adversarial sampling loss.
Introduced in :cite:`sun2018rotate`.
.. math::
\mathcal{L} = -log\, \sigma(\gamma + f_{model} (\mathbf{s},\mathbf{o})) - \sum_{i=1}^{n} p(h_{i}^{'}, r, t_{i}^{'} ) \ log \ \sigma(-f_{model}(\mathbf{s}_{i}^{'},\mathbf{o}_{i}^{'}) - \gamma)
where :math:`\mathbf{s}, \mathbf{o} \in \mathcal{R}^k` are the embeddings of the subject
and object of a triple :math:`t=(s,r,o)`, :math:`\gamma` is the margin, :math:`\sigma` the sigmoid function,
and :math:`p(s_{i}^{'}, r, o_{i}^{'} )` is the negatives sampling distribution which is defined as:
.. math::
p(s'_j, r, o'_j | \{(s_i, r_i, o_i)\}) = \\frac{\exp \\alpha \, f_{model}(\mathbf{s'_j}, \mathbf{o'_j})}{\sum_i \exp \\alpha \, f_{model}(\mathbf{s'_i}, \mathbf{o'_i})}
where :math:`\\alpha` is the temperature of sampling, :math:`f_{model}` is the scoring function of
the desired embeddings model.
"""
def __init__(self, eta, loss_params={'margin': DEFAULT_MARGIN_ADVERSARIAL,
'alpha': DEFAULT_ALPHA_ADVERSARIAL}, verbose=False):
"""Initialize Loss
Parameters
----------
eta: int
number of negatives
loss_params : dict
Dictionary of loss-specific hyperparams:
- **'margin'**: (float). Margin to be used for loss computation (default: 1)
- **'alpha'** : (float). Temperature of sampling (default:0.5)
Example: ``loss_params={'margin': 1, 'alpha': 0.5}``
"""
super().__init__(eta, loss_params, verbose)
def _init_hyperparams(self, hyperparam_dict):
""" Initializes the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dictionary
Consists of key value pairs. The Loss will check the keys to get the corresponding params
- **margin** - Margin to be used in adversarial loss computation (default:3)
- **alpha** - Temperature of sampling (default:0.5)
"""
self._loss_parameters['margin'] = hyperparam_dict.get('margin', DEFAULT_MARGIN_ADVERSARIAL)
self._loss_parameters['alpha'] = hyperparam_dict.get('alpha', DEFAULT_ALPHA_ADVERSARIAL)
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function.
Parameters
----------
scores_pos : tf.Tensor, shape [n, 1]
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor, shape [n*negative_count, 1]
A tensor of scores assigned to negative statements.
Returns
-------
loss : tf.Tensor
The loss value that must be minimized.
"""
margin = tf.constant(self._loss_parameters['margin'], dtype=tf.float32, name='margin')
alpha = tf.constant(self._loss_parameters['alpha'], dtype=tf.float32, name='alpha')
# Compute p(neg_samples) based on eq 4
scores_neg_reshaped = tf.reshape(scores_neg, [self._loss_parameters['eta'], tf.shape(scores_pos)[0]])
p_neg = tf.nn.softmax(alpha * scores_neg_reshaped, axis=0)
# Compute Loss based on eg 5
loss = tf.reduce_sum(-tf.log(tf.nn.sigmoid(margin - tf.negative(scores_pos)))) - \
tf.reduce_sum(tf.multiply(p_neg,
tf.log(tf.nn.sigmoid(tf.negative(scores_neg_reshaped) - margin))))
return loss
@register_loss("multiclass_nll", [], {'require_same_size_pos_neg': False})
class NLLMulticlass(Loss):
""" Multiclass NLL Loss.
Introduced in :cite:`chen2015` where both the subject and objects are corrupted (to use it in this way pass
corrupt_sides = ['s', 'o'] to embedding_model_params) .
This loss was re-engineered in :cite:`kadlecBK17` where only the object was corrupted to get improved
performance (to use it in this way pass corrupt_sides = 'o' to embedding_model_params).
.. math::
\mathcal{L(X)} = -\sum_{x_{e_1,e_2,r_k} \in X} log\,p(e_2|e_1,r_k) -\sum_{x_{e_1,e_2,r_k} \in X} log\,p(e_1|r_k, e_2)
Examples
--------
>>> from ampligraph.latent_features import TransE
>>> model = TransE(batches_count=1, seed=555, epochs=20, k=10,
>>> embedding_model_params={'corrupt_sides':['s', 'o']},
>>> loss='multiclass_nll', loss_params={})
"""
def __init__(self, eta, loss_params={}, verbose=False):
"""Initialize Loss
Parameters
----------
eta: int
number of negatives
loss_params : dict
Dictionary of loss-specific hyperparams:
"""
super().__init__(eta, loss_params, verbose)
def _init_hyperparams(self, hyperparam_dict):
""" Verifies and stores the hyperparameters needed by the algorithm.
Parameters
----------
hyperparam_dict : dictionary
Consists of key value pairs. The Loss will check the keys to get the corresponding params
"""
pass
def _apply(self, scores_pos, scores_neg):
""" Apply the loss function.
Parameters
----------
scores_pos : tf.Tensor, shape [n, 1]
A tensor of scores assigned to positive statements.
scores_neg : tf.Tensor, shape [n*negative_count, 1]
A tensor of scores assigned to negative statements.
Returns
-------
loss : float
The loss value that must be minimized.
"""
scores_neg_reshaped = tf.reshape(scores_neg, [self._loss_parameters['eta'], tf.shape(scores_pos)[0]])
neg_exp = tf.exp(scores_neg_reshaped)
pos_exp = tf.exp(scores_pos)
softmax_score = pos_exp/(tf.reduce_sum(neg_exp, axis = 0) + pos_exp)
loss = -tf.reduce_sum(tf.log(softmax_score))
return loss
|
py
|
1a5af5f06a47096cc2d4c34aeb596f26fbba11ce
|
import avroconvert as avc
from multiprocessing import cpu_count
import concurrent
class Execute:
def __init__(self, source: str, bucket: str, dst_format: str, outfolder: str, prefix: str = '', **kwargs):
'''
A wrapper class to run the avro convert operation. This class
calls the reader methods (gcs, s3 or local) and avro converter
methods internally.
:param source: Name of the source file system. Should be one
of these: gs, s3 of fs.
gs is for google cloud bucket
s3 is for amazon s3 bucket
fs is for local file system
:type source: str
:param bucket: Name of the bucket to read the files. For local
file system, bucket would be the folder name from where
the data will be read and converted to specified
output format
:type bucket: str
:param dst_format: Target output format. The files read from
different sources will be converted to the
format specified by this parameter. It's
value should be one of these:
cs, parquet or json, defaults to parquet
:type dst_format: str
:param outfolder: Output folder. This is where the files
converted from avro to csv, parquet or json
will be stored
:type outfolder: str
:param prefix: File prefix. If given, files whose names start with
the given prefix will be read and all the other
files will be omitted
:type prefix: str
:key auth_file: Pass this parameter only when the source is `gs`.
It specifies the location of service account json
file to access google cloud storage. If google
cloud is authenticated or the environment
variable GOOGLE_APPLICATION_CREDENTIALS is set
in the already, then this parameter is not
required
:key access_key: Pass this parameter only when the source is `s3`.
It specifies AWS access key id. If aws is already
authenticated or there exists a file ~/.aws/credentials
or the environment variable AWS_ACCESS_KEY_ID is set,
then this parameter is not required
:key secret_key: Pass this parameter only when the source is `s3`.
It specifies AWS secret key. If aws is already
authenticated or there exists a file ~/.aws/credentials
or the environment variable AWS_SECRET_ACCESS_KEY is set,
then this parameter is not required
:key session_token: Pass this parameter only when the source is `s3`.
It specifies AWS session token.
'''
_src = ['s3', 'gs', 'fs']
_dst_format = ['parquet', 'csv', 'json']
source = source.lower()
if not dst_format:
raise AttributeError(f'Output format not specified, should be one of {_dst_format}')
if not outfolder:
raise AttributeError(f'Please specify an output folder')
dst_format = dst_format.lower()
if source not in _src:
raise Exception(
f'Invalid source {source} passed. Source should be one of {_src}')
if dst_format not in _dst_format:
raise Exception(
f'Invalid format {dst_format}. It should be one of {_dst_format}')
if not bucket:
raise Exception(
f'Please specify a bucket')
self.source = source
self.bucket = bucket
self.prefix = prefix
self.dst_format = dst_format
self.outfolder = outfolder
self.params = kwargs
def _resolve(self):
'''
This method returns a reader instance depending upon
the source specified. If the source is `gs`, this
method will return `gs_reader`; if the source is `s3`,
this method will return `s3_reader`; if the source is
`fs`, this method will return `fs_reader` object
'''
reader_function = getattr(avc, f'{self.source}_reader')
reader = reader_function(
bucket=self.bucket, prefix=self.prefix, **self.params)
return reader
def run(self) -> bool:
'''
Executor method for the AvroConverter class. This method
parallelizes the execution for all the file read->convert->write operations.
'''
raw_content = self._resolve().get_data()
if not raw_content:
return
num_process = cpu_count()*2
avro_object = avc.AvroConvert(
dst_format=self.dst_format, outfolder=self.outfolder)
with concurrent.futures.ProcessPoolExecutor(max_workers=int(num_process)) as executor:
results = [executor.submit(
avro_object.convert_avro, **{'filename': filename, 'data': avrodata}) for filename, avrodata in raw_content.items()]
return True
|
py
|
1a5af81c32896397491604609604c9335bee7695
|
import logging
import os
import subprocess
logging.basicConfig()
logger = logging.getLogger("kalliope")
MPLAYER_EXEC_PATH = "/usr/bin/mplayer"
class Mplayer(object):
"""
This Class is representing the MPlayer Object used to play the all sound of the system.
"""
def __init__(self):
pass
@classmethod
def play(cls, filepath):
"""
Play the sound located in the provided filepath
:param filepath: The file path of the sound to play
:type filepath: str
:Example:
Mplayer.play(self.file_path)
.. seealso:: TTS
.. raises::
.. warnings:: Class Method and Public
"""
mplayer_exec_path = [MPLAYER_EXEC_PATH]
mplayer_options = ['-slave', '-quiet']
mplayer_command = list()
mplayer_command.extend(mplayer_exec_path)
mplayer_command.extend(mplayer_options)
mplayer_command.append(filepath)
logger.debug("Mplayer cmd: %s" % str(mplayer_command))
fnull = open(os.devnull, 'w')
subprocess.call(mplayer_command, stdout=fnull, stderr=fnull)
|
py
|
1a5af8cddc78a87019bac30599115a84d8e394a4
|
"""
Classes involved in doctesting
This module controls the various classes involved in doctesting.
AUTHORS:
- David Roe (2012-03-27) -- initial version, based on Robert Bradshaw's code.
"""
#*****************************************************************************
# Copyright (C) 2012 David Roe <[email protected]>
# Robert Bradshaw <[email protected]>
# William Stein <[email protected]>
#
# Distributed under the terms of the GNU General Public License (GPL)
# as published by the Free Software Foundation; either version 2 of
# the License, or (at your option) any later version.
# http://www.gnu.org/licenses/
#*****************************************************************************
import random, os, sys, time, json, re, types
import sage.misc.flatten
from sage.structure.sage_object import SageObject
from sage.env import DOT_SAGE, SAGE_LIB, SAGE_SRC
from sage.ext.c_lib import AlarmInterrupt, _init_csage
from sources import FileDocTestSource, DictAsObject
from forker import DocTestDispatcher
from reporting import DocTestReporter
from util import NestedName, Timer, count_noun, dict_difference
nodoctest_regex = re.compile(r'\s*(#+|%+|r"+|"+|\.\.)\s*nodoctest')
optionaltag_regex = re.compile(r'^\w+$')
class DocTestDefaults(SageObject):
"""
This class is used for doctesting the Sage doctest module.
It fills in attributes to be the same as the defaults defined in
``SAGE_LOCAL/bin/sage-runtests``, expect for a few places,
which is mostly to make doctesting more predictable.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults
sage: D = DocTestDefaults()
sage: D
DocTestDefaults()
sage: D.timeout
-1
Keyword arguments become attributes::
sage: D = DocTestDefaults(timeout=100)
sage: D
DocTestDefaults(timeout=100)
sage: D.timeout
100
"""
def __init__(self, **kwds):
"""
Edit these parameters after creating an instance.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults
sage: D = DocTestDefaults(); D.optional
set(['sage'])
"""
self.nthreads = 1
self.serial = False
self.timeout = -1
self.all = False
self.logfile = None
self.sagenb = False
self.long = False
self.warn_long = None
self.optional = set(["sage"])
self.randorder = None
self.global_iterations = 1 # sage-runtests default is 0
self.file_iterations = 1 # sage-runtests default is 0
self.initial = False
self.force_lib = False
self.abspath = True # sage-runtests default is False
self.verbose = False
self.debug = False
self.gdb = False
self.valgrind = False
self.massif = False
self.cachegrind = False
self.omega = False
self.failed = False
self.new = False
self.show_skipped = False
# We don't want to use the real stats file by default so that
# we don't overwrite timings for the actual running doctests.
self.stats_path = os.path.join(DOT_SAGE, "timings_dt_test.json")
self.__dict__.update(kwds)
def _repr_(self):
"""
Return the print representation.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults
sage: DocTestDefaults(timeout=100, foobar="hello")
DocTestDefaults(foobar='hello', timeout=100)
"""
s = "DocTestDefaults("
for k in sorted(dict_difference(self.__dict__, DocTestDefaults().__dict__).keys()):
if s[-1] != "(":
s += ", "
s += str(k) + "=" + repr(getattr(self,k))
s += ")"
return s
def __cmp__(self, other):
"""
Comparison by __dict__.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults
sage: DD1 = DocTestDefaults(long=True)
sage: DD2 = DocTestDefaults(long=True)
sage: DD1 == DD2
True
"""
c = cmp(type(self), type(other))
if c: return c
return cmp(self.__dict__,other.__dict__)
def skipdir(dirname):
"""
Return True if and only if the directory ``dirname`` should not be
doctested.
EXAMPLES::
sage: from sage.doctest.control import skipdir
sage: skipdir(sage.env.SAGE_SRC)
False
sage: skipdir(os.path.join(sage.env.SAGE_SRC, "sage", "doctest", "tests"))
True
"""
if os.path.exists(os.path.join(dirname, "nodoctest.py")):
return True
return False
def skipfile(filename):
"""
Return True if and only if the file ``filename`` should not be
doctested.
EXAMPLES::
sage: from sage.doctest.control import skipfile
sage: skipfile("skipme.c")
True
sage: f = tmp_filename(ext=".pyx")
sage: skipfile(f)
False
sage: open(f, "w").write("# nodoctest")
sage: skipfile(f)
True
"""
base, ext = os.path.splitext(filename)
if ext not in ('.py', '.pyx', '.pxi', '.sage', '.spyx', '.rst', '.tex'):
return True
with open(filename) as F:
line_count = 0
for line in F:
if nodoctest_regex.match(line):
return True
line_count += 1
if line_count >= 10:
break
return False
class DocTestController(SageObject):
"""
This class controls doctesting of files.
After creating it with appropriate options, call the :meth:`run` method to run the doctests.
"""
def __init__(self, options, args):
"""
Initialization.
INPUT:
- options -- either options generated from the command line by SAGE_LOCAL/bin/sage-runtests
or a DocTestDefaults object (possibly with some entries modified)
- args -- a list of filenames to doctest
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: DC
DocTest Controller
"""
# First we modify options to take environment variables into
# account and check compatibility of the user's specified
# options.
if options.timeout < 0:
if options.gdb or options.debug:
# Interactive debuggers: "infinite" timeout
options.timeout = 0
elif options.valgrind or options.massif or options.cachegrind or options.omega:
# Non-interactive debuggers: 48 hours
options.timeout = int(os.getenv('SAGE_TIMEOUT_VALGRIND', 48 * 60 * 60))
elif options.long:
options.timeout = int(os.getenv('SAGE_TIMEOUT_LONG', 30 * 60))
else:
options.timeout = int(os.getenv('SAGE_TIMEOUT', 5 * 60))
if options.nthreads == 0:
options.nthreads = int(os.getenv('SAGE_NUM_THREADS_PARALLEL',1))
if options.failed and not (args or options.new or options.sagenb):
# If the user doesn't specify any files then we rerun all failed files.
options.all = True
if options.global_iterations == 0:
options.global_iterations = int(os.environ.get('SAGE_TEST_GLOBAL_ITER', 1))
if options.file_iterations == 0:
options.file_iterations = int(os.environ.get('SAGE_TEST_ITER', 1))
if options.debug and options.nthreads > 1:
print("Debugging requires single-threaded operation, setting number of threads to 1.")
options.nthreads = 1
if options.serial:
options.nthreads = 1
if options.verbose:
options.show_skipped = True
if isinstance(options.optional, basestring):
s = options.optional.lower()
if s in ['all', 'true']:
options.optional = True
else:
options.optional = set(s.split(','))
# Check that all tags are valid
for o in options.optional:
if not optionaltag_regex.search(o):
raise ValueError('invalid optional tag %s'%repr(o))
self.options = options
self.files = args
if options.logfile:
try:
self.logfile = open(options.logfile, 'a')
except IOError:
print "Unable to open logfile at %s\nProceeding without logging."%(options.logfile)
self.logfile = None
else:
self.logfile = None
self.stats = {}
self.load_stats(options.stats_path)
def _repr_(self):
"""
String representation.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: repr(DC) # indirect doctest
'DocTest Controller'
"""
return "DocTest Controller"
def load_stats(self, filename):
"""
Load stats from the most recent run(s).
Stats are stored as a JSON file, and include information on
which files failed tests and the walltime used for execution
of the doctests.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: import json
sage: filename = tmp_filename()
sage: with open(filename, 'w') as stats_file:
... json.dump({'sage.doctest.control':{u'walltime':1.0r}}, stats_file)
sage: DC.load_stats(filename)
sage: DC.stats['sage.doctest.control']
{u'walltime': 1.0}
If the file doesn't exist, nothing happens. If there is an
error, print a message. In any case, leave the stats alone::
sage: d = tmp_dir()
sage: DC.load_stats(os.path.join(d)) # Cannot read a directory
Error loading stats from ...
sage: DC.load_stats(os.path.join(d, "no_such_file"))
sage: DC.stats['sage.doctest.control']
{u'walltime': 1.0}
"""
# Simply ignore non-existing files
if not os.path.exists(filename):
return
try:
with open(filename) as stats_file:
self.stats.update(json.load(stats_file))
except Exception:
self.log("Error loading stats from %s"%filename)
def save_stats(self, filename):
"""
Save stats from the most recent run as a JSON file.
WARNING: This function overwrites the file.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: DC.stats['sage.doctest.control'] = {u'walltime':1.0r}
sage: filename = tmp_filename()
sage: DC.save_stats(filename)
sage: import json
sage: D = json.load(open(filename))
sage: D['sage.doctest.control']
{u'walltime': 1.0}
"""
from sage.misc.temporary_file import atomic_write
with atomic_write(filename) as stats_file:
json.dump(self.stats, stats_file)
def log(self, s, end="\n"):
"""
Logs the string ``s + end`` (where ``end`` is a newline by default)
to the logfile and prints it to the standard output.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DD = DocTestDefaults(logfile=tmp_filename())
sage: DC = DocTestController(DD, [])
sage: DC.log("hello world")
hello world
sage: DC.logfile.close()
sage: print open(DD.logfile).read()
hello world
Check that no duplicate logs appear, even when forking (:trac:`15244`)::
sage: DD = DocTestDefaults(logfile=tmp_filename())
sage: DC = DocTestController(DD, [])
sage: DC.log("hello world")
hello world
sage: if os.fork() == 0:
....: DC.logfile.close()
....: os._exit(0)
sage: DC.logfile.close()
sage: print open(DD.logfile).read()
hello world
"""
s += end
if self.logfile is not None:
self.logfile.write(s)
self.logfile.flush()
sys.stdout.write(s)
sys.stdout.flush()
def test_safe_directory(self, dir=None):
"""
Test that the given directory is safe to run Python code from.
We use the check added to Python for this, which gives a
warning when the current directory is considered unsafe. We promote
this warning to an error with ``-Werror``. See
``sage/tests/cmdline.py`` for a doctest that this works, see
also :trac:`13579`.
TESTS::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DD = DocTestDefaults()
sage: DC = DocTestController(DD, [])
sage: DC.test_safe_directory()
sage: d = os.path.join(tmp_dir(), "test")
sage: os.mkdir(d)
sage: os.chmod(d, 0o777)
sage: DC.test_safe_directory(d)
Traceback (most recent call last):
...
RuntimeError: refusing to run doctests...
"""
import subprocess
with open(os.devnull, 'w') as dev_null:
if subprocess.call(['python', '-Werror', '-c', ''],
stdout=dev_null, stderr=dev_null, cwd=dir) != 0:
raise RuntimeError(
"refusing to run doctests from the current "
"directory '{}' since untrusted users could put files in "
"this directory, making it unsafe to run Sage code from"
.format(os.getcwd()))
def create_run_id(self):
"""
Creates the run id.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(), [])
sage: DC.create_run_id()
Running doctests with ID ...
"""
self.run_id = time.strftime('%Y-%m-%d-%H-%M-%S-') + "%08x" % random.getrandbits(32)
from sage.version import version
self.log("Running doctests with ID %s."%self.run_id)
def add_files(self):
r"""
Checks for the flags '--all', '--new' and '--sagenb'.
For each one present, this function adds the appropriate directories and files to the todo list.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: log_location = os.path.join(SAGE_TMP, 'control_dt_log.log')
sage: DD = DocTestDefaults(all=True, logfile=log_location)
sage: DC = DocTestController(DD, [])
sage: DC.add_files()
Doctesting entire Sage library.
sage: os.path.join(SAGE_SRC, 'sage') in DC.files
True
::
sage: DD = DocTestDefaults(new = True)
sage: DC = DocTestController(DD, [])
sage: DC.add_files()
Doctesting ...
::
sage: DD = DocTestDefaults(sagenb = True)
sage: DC = DocTestController(DD, [])
sage: DC.add_files()
Doctesting the Sage notebook.
sage: DC.files[0][-6:]
'sagenb'
"""
opj = os.path.join
from sage.env import SAGE_SRC, SAGE_ROOT
def all_files():
from glob import glob
self.files.append(opj(SAGE_SRC, 'sage'))
self.files.append(opj(SAGE_SRC, 'doc', 'common'))
self.files.extend(glob(opj(SAGE_SRC, 'doc', '[a-z][a-z]')))
self.options.sagenb = True
DOT_GIT= opj(SAGE_ROOT, '.git')
have_git = os.path.exists(DOT_GIT)
if self.options.all or (self.options.new and not have_git):
self.log("Doctesting entire Sage library.")
all_files()
elif self.options.new and have_git:
# Get all files changed in the working repo.
self.log("Doctesting files changed since last git commit")
import subprocess
change = subprocess.check_output(["git",
"--git-dir=" + DOT_GIT,
"--work-tree=" + SAGE_ROOT,
"status",
"--porcelain"])
for line in change.split("\n"):
if not line:
continue
data = line.strip().split(' ')
status, filename = data[0], data[-1]
if (set(status).issubset("MARCU")
and filename.startswith("src/sage")
and (filename.endswith(".py") or filename.endswith(".pyx"))):
self.files.append(os.path.relpath(opj(SAGE_ROOT,filename)))
if self.options.sagenb:
if not self.options.all:
self.log("Doctesting the Sage notebook.")
from pkg_resources import Requirement, working_set
sagenb_loc = working_set.find(Requirement.parse('sagenb')).location
self.files.append(opj(sagenb_loc, 'sagenb'))
def expand_files_into_sources(self):
r"""
Expands ``self.files``, which may include directories, into a
list of :class:`sage.doctest.FileDocTestSource`
This function also handles the optional command line option.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: dirname = os.path.join(SAGE_SRC, 'sage', 'doctest')
sage: DD = DocTestDefaults(optional='all')
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: len(DC.sources)
9
sage: DC.sources[0].options.optional
True
::
sage: DD = DocTestDefaults(optional='magma,guava')
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: sorted(list(DC.sources[0].options.optional))
['guava', 'magma']
We check that files are skipped appropriately::
sage: dirname = tmp_dir()
sage: filename = os.path.join(dirname, 'not_tested.py')
sage: with open(filename, 'w') as F:
....: F.write("#"*80 + "\n\n\n\n## nodoctest\n sage: 1+1\n 4")
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: DC.sources
[]
The directory ``sage/doctest/tests`` contains ``nodoctest.py``
but the files should still be tested when that directory is
explicitly given (as opposed to being recursed into)::
sage: DC = DocTestController(DD, [os.path.join(SAGE_SRC, 'sage', 'doctest', 'tests')])
sage: DC.expand_files_into_sources()
sage: len(DC.sources) >= 10
True
"""
def expand():
for path in self.files:
if os.path.isdir(path):
for root, dirs, files in os.walk(path):
for dir in list(dirs):
if dir[0] == "." or skipdir(os.path.join(root,dir)):
dirs.remove(dir)
for file in files:
if not skipfile(os.path.join(root,file)):
yield os.path.join(root, file)
else:
# the user input this file explicitly, so we don't skip it
yield path
self.sources = [FileDocTestSource(path, self.options) for path in expand()]
def filter_sources(self):
"""
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: dirname = os.path.join(SAGE_SRC, 'sage', 'doctest')
sage: DD = DocTestDefaults(failed=True)
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: for i, source in enumerate(DC.sources):
... DC.stats[source.basename] = {'walltime': 0.1*(i+1)}
sage: DC.stats['sage.doctest.control'] = {'failed':True,'walltime':1.0}
sage: DC.filter_sources()
Only doctesting files that failed last test.
sage: len(DC.sources)
1
"""
# Filter the sources to only include those with failing doctests if the --failed option is passed
if self.options.failed:
self.log("Only doctesting files that failed last test.")
def is_failure(source):
basename = source.basename
return basename not in self.stats or self.stats[basename].get('failed')
self.sources = filter(is_failure, self.sources)
def sort_sources(self):
r"""
This function sorts the sources so that slower doctests are run first.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: dirname = os.path.join(SAGE_SRC, 'sage', 'doctest')
sage: DD = DocTestDefaults(nthreads=2)
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: DC.sources.sort(key=lambda s:s.basename)
sage: for i, source in enumerate(DC.sources):
... DC.stats[source.basename] = {'walltime': 0.1*(i+1)}
sage: DC.sort_sources()
Sorting sources by runtime so that slower doctests are run first....
sage: print "\n".join([source.basename for source in DC.sources])
sage.doctest.util
sage.doctest.test
sage.doctest.sources
sage.doctest.reporting
sage.doctest.parsing
sage.doctest.forker
sage.doctest.control
sage.doctest.all
sage.doctest
"""
if self.options.nthreads > 1 and len(self.sources) > self.options.nthreads:
self.log("Sorting sources by runtime so that slower doctests are run first....")
default = dict(walltime=0)
def sort_key(source):
basename = source.basename
return -self.stats.get(basename, default).get('walltime'), basename
self.sources = [x[1] for x in sorted((sort_key(source), source) for source in self.sources)]
def run_doctests(self):
"""
Actually runs the doctests.
This function is called by :meth:`run`.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: dirname = os.path.join(SAGE_SRC, 'sage', 'rings', 'homset.py')
sage: DD = DocTestDefaults()
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: DC.run_doctests()
Doctesting 1 file.
sage -t .../sage/rings/homset.py
[... tests, ... s]
----------------------------------------------------------------------
All tests passed!
----------------------------------------------------------------------
Total time for all tests: ... seconds
cpu time: ... seconds
cumulative wall time: ... seconds
"""
nfiles = 0
nother = 0
for F in self.sources:
if isinstance(F, FileDocTestSource):
nfiles += 1
else:
nother += 1
if self.sources:
filestr = ", ".join(([count_noun(nfiles, "file")] if nfiles else []) +
([count_noun(nother, "other source")] if nother else []))
threads = " using %s threads"%(self.options.nthreads) if self.options.nthreads > 1 else ""
iterations = []
if self.options.global_iterations > 1:
iterations.append("%s global iterations"%(self.options.global_iterations))
if self.options.file_iterations > 1:
iterations.append("%s file iterations"%(self.options.file_iterations))
iterations = ", ".join(iterations)
if iterations:
iterations = " (%s)"%(iterations)
self.log("Doctesting %s%s%s."%(filestr, threads, iterations))
self.reporter = DocTestReporter(self)
self.dispatcher = DocTestDispatcher(self)
N = self.options.global_iterations
for it in range(N):
try:
self.timer = Timer().start()
self.dispatcher.dispatch()
except KeyboardInterrupt:
it = N - 1
break
finally:
self.timer.stop()
self.reporter.finalize()
self.cleanup(it == N - 1)
else:
self.log("No files to doctest")
self.reporter = DictAsObject(dict(error_status=0))
def cleanup(self, final=True):
"""
Runs cleanup activities after actually running doctests.
In particular, saves the stats to disk and closes the logfile.
INPUT:
- ``final`` -- whether to close the logfile
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: dirname = os.path.join(SAGE_SRC, 'sage', 'rings', 'infinity.py')
sage: DD = DocTestDefaults()
sage: DC = DocTestController(DD, [dirname])
sage: DC.expand_files_into_sources()
sage: DC.sources.sort(key=lambda s:s.basename)
sage: for i, source in enumerate(DC.sources):
....: DC.stats[source.basename] = {'walltime': 0.1*(i+1)}
....:
sage: DC.run()
Running doctests with ID ...
Doctesting 1 file.
sage -t .../rings/infinity.py
[... tests, ... s]
----------------------------------------------------------------------
All tests passed!
----------------------------------------------------------------------
Total time for all tests: ... seconds
cpu time: ... seconds
cumulative wall time: ... seconds
0
sage: DC.cleanup()
"""
self.stats.update(self.reporter.stats)
self.save_stats(self.options.stats_path)
# Close the logfile
if final and self.logfile is not None:
self.logfile.close()
self.logfile = None
def _assemble_cmd(self):
"""
Assembles a shell command used in running tests under gdb or valgrind.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DC = DocTestController(DocTestDefaults(timeout=123), ["hello_world.py"])
sage: print DC._assemble_cmd()
python "$SAGE_LOCAL/bin/sage-runtests" --serial --timeout=123 hello_world.py
"""
cmd = '''python "%s" --serial '''%(os.path.join("$SAGE_LOCAL","bin","sage-runtests"))
opt = dict_difference(self.options.__dict__, DocTestDefaults().__dict__)
for o in ("all", "sagenb"):
if o in opt:
raise ValueError("You cannot run gdb/valgrind on the whole sage%s library"%("" if o == "all" else "nb"))
for o in ("all", "sagenb", "long", "force_lib", "verbose", "failed", "new"):
if o in opt:
cmd += "--%s "%o
for o in ("timeout", "optional", "randorder", "stats_path"):
if o in opt:
cmd += "--%s=%s "%(o, opt[o])
return cmd + " ".join(self.files)
def run_val_gdb(self, testing=False):
"""
Spawns a subprocess to run tests under the control of gdb or valgrind.
INPUT:
- ``testing`` -- boolean; if True then the command to be run
will be printed rather than a subprocess started.
EXAMPLES:
Note that the command lines include unexpanded environment
variables. It is safer to let the shell expand them than to
expand them here and risk insufficient quoting. ::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: DD = DocTestDefaults(gdb=True)
sage: DC = DocTestController(DD, ["hello_world.py"])
sage: DC.run_val_gdb(testing=True)
exec gdb -x "$SAGE_LOCAL/bin/sage-gdb-commands" --args python "$SAGE_LOCAL/bin/sage-runtests" --serial --timeout=0 hello_world.py
::
sage: DD = DocTestDefaults(valgrind=True, optional="all", timeout=172800)
sage: DC = DocTestController(DD, ["hello_world.py"])
sage: DC.run_val_gdb(testing=True)
exec valgrind --tool=memcheck --leak-resolution=high --leak-check=full --num-callers=25 --suppressions="$SAGE_LOCAL/lib/valgrind/sage.supp" --log-file=".../valgrind/sage-memcheck.%p" python "$SAGE_LOCAL/bin/sage-runtests" --serial --timeout=172800 --optional=True hello_world.py
"""
try:
sage_cmd = self._assemble_cmd()
except ValueError:
self.log(sys.exc_info()[1])
return 2
opt = self.options
if opt.gdb:
cmd = '''exec gdb -x "$SAGE_LOCAL/bin/sage-gdb-commands" --args '''
flags = ""
if opt.logfile:
sage_cmd += " --logfile %s"%(opt.logfile)
else:
if opt.logfile is None:
default_log = os.path.join(DOT_SAGE, "valgrind")
if os.path.exists(default_log):
if not os.path.isdir(default_log):
self.log("%s must be a directory"%default_log)
return 2
else:
os.makedirs(default_log)
logfile = os.path.join(default_log, "sage-%s")
else:
logfile = opt.logfile
if opt.valgrind:
toolname = "memcheck"
flags = os.getenv("SAGE_MEMCHECK_FLAGS")
if flags is None:
flags = "--leak-resolution=high --leak-check=full --num-callers=25 "
flags += '''--suppressions="%s" '''%(os.path.join("$SAGE_LOCAL","lib","valgrind","sage.supp"))
elif opt.massif:
toolname = "massif"
flags = os.getenv("SAGE_MASSIF_FLAGS", "--depth=6 ")
elif opt.cachegrind:
toolname = "cachegrind"
flags = os.getenv("SAGE_CACHEGRIND_FLAGS", "")
elif opt.omega:
toolname = "exp-omega"
flags = os.getenv("SAGE_OMEGA_FLAGS", "")
cmd = "exec valgrind --tool=%s "%(toolname)
flags += ''' --log-file="%s" ''' % logfile
if opt.omega:
toolname = "omega"
if "%s" in flags:
flags %= toolname + ".%p" # replace %s with toolname
cmd += flags + sage_cmd
self.log(cmd)
sys.stdout.flush()
sys.stderr.flush()
if self.logfile is not None:
self.logfile.flush()
if testing:
return
# Setup Sage signal handler
_init_csage()
import signal, subprocess
p = subprocess.Popen(cmd, shell=True)
if opt.timeout > 0:
signal.alarm(opt.timeout)
try:
return p.wait()
except AlarmInterrupt:
self.log(" Timed out")
return 4
except KeyboardInterrupt:
self.log(" Interrupted")
return 128
finally:
signal.alarm(0)
if p.returncode is None:
p.terminate()
def run(self):
"""
This function is called after initialization to set up and run all doctests.
EXAMPLES::
sage: from sage.doctest.control import DocTestDefaults, DocTestController
sage: from sage.env import SAGE_SRC
sage: import os
sage: DD = DocTestDefaults()
sage: filename = os.path.join(SAGE_SRC, "sage", "sets", "non_negative_integers.py")
sage: DC = DocTestController(DD, [filename])
sage: DC.run()
Running doctests with ID ...
Doctesting 1 file.
sage -t .../sage/sets/non_negative_integers.py
[... tests, ... s]
----------------------------------------------------------------------
All tests passed!
----------------------------------------------------------------------
Total time for all tests: ... seconds
cpu time: ... seconds
cumulative wall time: ... seconds
0
"""
opt = self.options
L = (opt.gdb, opt.valgrind, opt.massif, opt.cachegrind, opt.omega)
if any(L):
if L.count(True) > 1:
self.log("You may only specify one of gdb, valgrind/memcheck, massif, cachegrind, omega")
return 2
return self.run_val_gdb()
else:
self.test_safe_directory()
self.create_run_id()
self.add_files()
self.expand_files_into_sources()
self.filter_sources()
self.sort_sources()
self.run_doctests()
return self.reporter.error_status
def run_doctests(module, options=None):
"""
Runs the doctests in a given file.
INPUTS:
- ``module`` -- a Sage module, a string, or a list of such.
- ``options`` -- a DocTestDefaults object or None.
EXAMPLES::
sage: run_doctests(sage.rings.infinity)
Running doctests with ID ...
Doctesting 1 file.
sage -t .../sage/rings/infinity.py
[... tests, ... s]
----------------------------------------------------------------------
All tests passed!
----------------------------------------------------------------------
Total time for all tests: ... seconds
cpu time: ... seconds
cumulative wall time: ... seconds
"""
import sys
sys.stdout.flush()
def stringify(x):
if isinstance(x, (list, tuple)):
F = [stringify(a) for a in x]
return sage.misc.flatten.flatten(F)
elif isinstance(x, types.ModuleType):
F = x.__file__.replace(SAGE_LIB, SAGE_SRC)
base, pyfile = os.path.split(F)
file, ext = os.path.splitext(pyfile)
if ext == ".pyc":
ext = ".py"
elif ext == ".so":
ext = ".pyx"
if file == "__init__":
return [base]
else:
return [os.path.join(base, file) + ext]
elif isinstance(x, basestring):
return [os.path.abspath(x)]
F = stringify(module)
if options is None:
options = DocTestDefaults()
DC = DocTestController(options, F)
# Determine whether we're in doctest mode
save_dtmode = sage.doctest.DOCTEST_MODE
# We need the following if we're not in DOCTEST_MODE
# Tell IPython to avoid colors: it screws up the output checking.
if not save_dtmode:
if options.debug:
raise ValueError("You should not try to run doctests with a debugger from within Sage: IPython objects to embedded shells")
IP = get_ipython()
old_color = IP.colors
IP.run_line_magic('colors', 'NoColor')
old_config_color = IP.config.TerminalInteractiveShell.colors
IP.config.TerminalInteractiveShell.colors = 'NoColor'
try:
DC.run()
finally:
sage.doctest.DOCTEST_MODE = save_dtmode
if not save_dtmode:
IP.run_line_magic('colors', old_color)
IP.config.TerminalInteractiveShell.colors = old_config_color
|
py
|
1a5af992a916edc679225095a032f363f320fcde
|
import os
import requests
import prometheus_client
import threading
import logging
import time
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
PORT=9387
APIBASEURL = os.environ['SABNZBD_BASEURL']
APIKEY = os.environ['SABNZBD_APIKEY']
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO, datefmt='%Y/%m/%d %H:%M:%S')
logging.info("Starting sabnzbd_exporter on port: %d",PORT)
logging.info("Connecting to %s",APIBASEURL)
def getAPIUrl(mode):
return '{}/api?output=json&apikey={}&mode={}'.format(APIBASEURL, APIKEY, mode)
def get_sec(time_str):
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
class CustomCollector(object):
def collect(self):
try:
server_stats_url = getAPIUrl('server_stats')
start = time.time()
server_stats = requests.get(server_stats_url).json()
elapsed = time.time() - start
logging.info("Request to %s returned in %s",server_stats_url, elapsed)
dwn = GaugeMetricFamily('sabnzbd_download_bytes', 'SABnzbd Overall download metrics', labels=['period'])
dwn.add_metric(['total'], server_stats['total'])
dwn.add_metric(['day'], server_stats['day'])
dwn.add_metric(['week'], server_stats['week'])
dwn.add_metric(['month'], server_stats['month'])
yield dwn
server_dwn = GaugeMetricFamily('sabnzbd_server_download_bytes','SABnzbd per server download metrics',labels=['server','period'])
for server, metrics in server_stats['servers'].items():
for metric,val in metrics.items():
if metric != 'daily':
server_dwn.add_metric([server,metric],val)
yield server_dwn
start = time.time()
queue_stats_url = getAPIUrl('queue')
queue_stats = requests.get(queue_stats_url).json()["queue"]
elapsed = time.time() - start
logging.info("Request to %s returned in %s",queue_stats_url, elapsed)
yield GaugeMetricFamily('sabnzbd_queue_size','SABnzbd Current Queue Length',value=queue_stats['noofslots_total'])
yield GaugeMetricFamily('sabnzbd_queue_download_rate_bytes_per_second','SABnzbd download rate',value=float(queue_stats['kbpersec'])*1024)
yield GaugeMetricFamily('sabnzbd_queue_remaining_bytes','SABnzbd queue remaining size',value=float(queue_stats['mbleft'])*1024*1024)
yield GaugeMetricFamily('sabnzbd_queue_total_size_bytes','SABnzbd queue total size',value=float(queue_stats['mb'])*1024*1024)
yield GaugeMetricFamily('sabnzbd_queue_remaining_seconds','SABnzbd estimated time remaining',value=get_sec(queue_stats['timeleft']))
except Exception as inst:
logging.error('Error getting stats: %s', inst)
REGISTRY.register(CustomCollector())
start_http_server(PORT)
DE = threading.Event()
DE.wait()
|
py
|
1a5afb5869ad68d289b19224e6fcd2a62de9b95c
|
from subprocess import call
isbn_regex = '^(97(8|9)-?)?\d{9}(\d|X)$'
def fix_author(author):
parts = author.split(u', ')
if len(parts) == 2:
return parts[1] + u' ' + parts[0]
return author
def call_mktorrent(target, torrent_filename, announce, torrent_name=None):
args = [
'mktorrent',
'-a', announce,
'-p',
'-o', torrent_filename,
]
if torrent_name:
args.extend(('-n', torrent_name))
args.append(target)
if call(args) != 0:
raise Exception('mktorrent returned non-zero')
|
py
|
1a5afcf17fb852647842b20bb5742497c59702d2
|
"""Asynchronous client for the PVOutput API."""
|
py
|
1a5afd4878a836348fd2212a16427b0e8beac086
|
# pylint: disable=R0913
# pylint: disable=W0621
import os
from urllib.parse import quote
import pytest
from aiohttp import web
from simcore_service_storage.db import setup_db
from simcore_service_storage.dsm import setup_dsm
from simcore_service_storage.rest import setup_rest
from simcore_service_storage.s3 import setup_s3
from simcore_service_storage.settings import APP_CONFIG_KEY, SIMCORE_S3_ID
def parse_db(dsm_mockup_db):
id_name_map = {}
id_file_count = {}
for d in dsm_mockup_db.keys():
md = dsm_mockup_db[d]
if not md.user_id in id_name_map:
id_name_map[md.user_id] = md.user_name
id_file_count[md.user_id] = 1
else:
id_file_count[md.user_id] = id_file_count[md.user_id] + 1
return id_file_count, id_name_map
@pytest.fixture
def client(loop, aiohttp_unused_port, aiohttp_client, python27_path, postgres_service, minio_service, osparc_api_specs_dir):
app = web.Application()
main_cfg = {
'port': aiohttp_unused_port(),
'host': 'localhost',
'python2': python27_path,
"max_workers" : 4
}
rest_cfg = {
'oas_repo': str(osparc_api_specs_dir), #'${OSPARC_SIMCORE_REPO_ROOTDIR}/api/specs',
#oas_repo: http://localhost:8043/api/specs
}
postgres_cfg = postgres_service
s3_cfg = minio_service
# fake config
app[APP_CONFIG_KEY] = {
'main': main_cfg,
'postgres' : postgres_cfg,
's3' : s3_cfg,
'rest': rest_cfg
}
#app.middlewares.append(dsm_middleware)
setup_db(app)
setup_rest(app)
setup_dsm(app)
setup_s3(app)
cli = loop.run_until_complete( aiohttp_client(app, server_kwargs=main_cfg) )
return cli
async def test_health_check(client):
resp = await client.get("/v0/")
text = await resp.text()
assert resp.status == 200, text
payload = await resp.json()
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert data
assert not error
assert data['name'] == 'simcore_service_storage'
assert data['status'] == 'SERVICE_RUNNING'
@pytest.mark.travis
async def test_locations(client):
user_id = "0"
resp = await client.get("/v0/locations?user_id={}".format(user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert len(data) == 2
assert not error
async def test_s3_files_metadata(client, dsm_mockup_db):
id_file_count, _id_name_map = parse_db(dsm_mockup_db)
# list files for every user
for _id in id_file_count:
resp = await client.get("/v0/locations/0/files/metadata?user_id={}".format(_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert len(data) == id_file_count[_id]
# list files fileterd by uuid
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
uuid_filter = os.path.join(fmd.project_id, fmd.node_id)
resp = await client.get("/v0/locations/0/files/metadata?user_id={}&uuid_filter={}".format(fmd.user_id, quote(uuid_filter, safe='')))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
for d in data:
assert os.path.join(d['project_id'], d['node_id']) == uuid_filter
async def test_s3_file_metadata(client, dsm_mockup_db):
# go through all files and get them
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.get("/v0/locations/0/files/{}/metadata?user_id={}".format(quote(fmd.file_uuid, safe=''), fmd.user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert data
async def test_download_link(client, dsm_mockup_db):
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.get("/v0/locations/0/files/{}?user_id={}".format(quote(fmd.file_uuid, safe=''), fmd.user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert data
async def test_upload_link(client, dsm_mockup_db):
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.put("/v0/locations/0/files/{}?user_id={}".format(quote(fmd.file_uuid, safe=''), fmd.user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert data
@pytest.mark.travis
async def test_copy(client, dsm_mockup_db, datcore_testbucket):
# copy N files
N = 2
counter = 0
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
source_uuid = fmd.file_uuid
datcore_uuid = os.path.join(datcore_testbucket, fmd.file_name)
resp = await client.put("/v0/locations/1/files/{}?user_id={}&extra_location={}&extra_source={}".format(quote(datcore_uuid, safe=''),
fmd.user_id, SIMCORE_S3_ID, quote(source_uuid, safe='')))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert data
counter = counter + 1
if counter == N:
break
# list files for every user
user_id = "0"
resp = await client.get("/v0/locations/1/files/metadata?user_id={}".format(user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert len(data) == 2 + N
async def test_delete_file(client, dsm_mockup_db):
id_file_count, _id_name_map = parse_db(dsm_mockup_db)
for d in dsm_mockup_db.keys():
fmd = dsm_mockup_db[d]
resp = await client.delete("/v0/locations/0/files/{}?user_id={}".format(quote(fmd.file_uuid, safe=''), fmd.user_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert not data
for _id in id_file_count:
resp = await client.get("/v0/locations/0/files/metadata?user_id={}".format(_id))
payload = await resp.json()
assert resp.status == 200, str(payload)
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert not error
assert len(data) == 0
async def test_action_check(client):
QUERY = 'mguidon'
ACTION = 'echo'
FAKE = {
'path_value': 'one',
'query_value': 'two',
'body_value': {
'a': 33,
'b': 45
}
}
resp = await client.post("/v0/check/{}?data={}".format(ACTION, QUERY), json=FAKE)
payload = await resp.json()
data, error = tuple( payload.get(k) for k in ('data', 'error') )
assert resp.status == 200, str(payload)
assert data
assert not error
# TODO: validate response against specs
assert data['path_value'] == ACTION
assert data['query_value'] == QUERY
|
py
|
1a5afd6f40bcc70602721e8fdb5797edf9e5c8d7
|
from telegram import Update
from telegram.ext import CallbackContext
from app.extensions import db
from app.lib.handlers.base import BaseHandler, app_context
from app.models import Channel
class MigrateFilter(BaseHandler):
@app_context
def handler(self, update: Update, context: CallbackContext):
message = update.message
# Incase we get migrate_to_chat_id update, ignore. We'll be getting another update
# from migrate_from_chat_id after it.
if not message.migrate_from_chat_id:
return
original_chat_id = str(message.migrate_from_chat_id)
new_chat_id = str(message.chat_id)
self.logger.debug(f"migrating chat_id from {original_chat_id} to {new_chat_id}")
channel = Channel.query.filter(
Channel.chat_id == original_chat_id
).one_or_none()
if not channel:
self.logger.error(
f"Unable to find a channel that should exists: original_chat_id: {original_chat_id}, new_chat_id: {new_chat_id}"
)
return
channel.chat_id = new_chat_id
db.session.commit()
|
py
|
1a5afdad663eda5d4b58d1b44092d37a43a31510
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2007 Christopher Lenz <[email protected]>
# Copyright (C) 2008 Matt Good <[email protected]>
# Copyright (C) 2008-2010 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://bitten.edgewall.org/wiki/License.
"""Recipe commands for tools commonly used by Python projects."""
from __future__ import division
import logging
import os
import cPickle as pickle
import re
try:
set
except NameError:
from sets import Set as set
import shlex
import sys
from bitten.build import CommandLine, FileSet
from bitten.util import loc, xmlio
log = logging.getLogger('bitten.build.pythontools')
__docformat__ = 'restructuredtext en'
def _python_path(ctxt):
"""Return the path to the Python interpreter.
If the configuration has a ``python.path`` property, the value of that
option is returned; otherwise the path to the current Python interpreter is
returned.
"""
python_path = ctxt.config.get_filepath('python.path')
if python_path:
return python_path
return sys.executable
def distutils(ctxt, file_='setup.py', command='build',
options=None, timeout=None):
"""Execute a ``distutils`` command.
:param ctxt: the build context
:type ctxt: `Context`
:param file\_: name of the file defining the distutils setup
:param command: the setup command to execute
:param options: additional options to pass to the command
:param timeout: the number of seconds before the external process should
be aborted (has same constraints as CommandLine)
"""
if options:
if isinstance(options, basestring):
options = shlex.split(options)
else:
options = []
if timeout:
timeout = int(timeout)
cmdline = CommandLine(_python_path(ctxt),
[ctxt.resolve(file_), command] + options,
cwd=ctxt.basedir)
log_elem = xmlio.Fragment()
error_logged = False
for out, err in cmdline.execute(timeout):
if out is not None:
log.info(out)
log_elem.append(xmlio.Element('message', level='info')[out])
if err is not None:
level = 'error'
if err.startswith('warning: '):
err = err[9:]
level = 'warning'
log.warning(err)
elif err.startswith('error: '):
ctxt.error(err[7:])
error_logged = True
else:
log.error(err)
log_elem.append(xmlio.Element('message', level=level)[err])
ctxt.log(log_elem)
if not error_logged and cmdline.returncode != 0:
ctxt.error('distutils failed (%s)' % cmdline.returncode)
def exec_(ctxt, file_=None, module=None, function=None,
output=None, args=None, timeout=None):
"""Execute a Python script.
Either the `file_` or the `module` parameter must be provided. If
specified using the `file_` parameter, the file must be inside the project
directory. If specified as a module, the module must either be resolvable
to a file, or the `function` parameter must be provided
:param ctxt: the build context
:type ctxt: `Context`
:param file\_: name of the script file to execute
:param module: name of the Python module to execute
:param function: name of the Python function to run
:param output: name of the file to which output should be written
:param args: extra arguments to pass to the script
:param timeout: the number of seconds before the external process should
be aborted (has same constraints as CommandLine)
"""
assert file_ or module, 'Either "file" or "module" attribute required'
if function:
assert module and not file_, '"module" attribute required for use of ' \
'"function" attribute'
if module:
# Script specified as module name, need to resolve that to a file,
# or use the function name if provided
if function:
args = '-c "import sys; from %s import %s; %s(sys.argv)" %s' % (
module, function, function, args)
else:
try:
mod = __import__(module, globals(), locals(), [])
components = module.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
file_ = mod.__file__.replace('\\', '/')
except ImportError, e:
ctxt.error('Cannot execute Python module %s: %s' % (module, e))
return
from bitten.build import shtools
returncode = shtools.execute(ctxt, executable=_python_path(ctxt),
file_=file_, output=output, args=args,
timeout=timeout)
if returncode != 0:
ctxt.error('Executing %s failed (error code %s)' % \
(file_ or function or module, returncode))
def pylint(ctxt, file_=None):
"""Extract data from a ``pylint`` run written to a file.
:param ctxt: the build context
:type ctxt: `Context`
:param file\_: name of the file containing the Pylint output
"""
assert file_, 'Missing required attribute "file"'
msg_re = re.compile(r"^(?P<file>.+):(?P<line>\d+): "
r"\[(?P<type>[WECR]\d+)\((?P<symbol>.+)\), "
r"(?P<tag>.+)?\] (?P<msg>.+)$")
msg_categories = dict(W='warning', E='error', C='convention', R='refactor')
problems = xmlio.Fragment()
try:
fd = open(ctxt.resolve(file_), 'r')
try:
for line in fd:
match = msg_re.search(line)
if match:
msg_type = match.group('type')
category = msg_categories.get(msg_type[0])
if len(msg_type) == 1:
msg_type = None
filename = match.group('file')
if os.path.isabs(filename) \
and filename.startswith(ctxt.basedir):
filename = filename[len(ctxt.basedir) + 1:]
filename = filename.replace('\\', '/')
lineno = int(match.group('line'))
tag = match.group('tag')
problems.append(xmlio.Element('problem', category=category,
type=msg_type, tag=tag,
line=lineno, file=filename)[
xmlio.Element('msg')[match.group('msg') or '']
])
ctxt.report('lint', problems)
finally:
fd.close()
except IOError, e:
log.warning('Error opening pylint results file (%s)', e)
def coverage(ctxt, summary=None, coverdir=None, include=None, exclude=None):
"""Extract data from a ``coverage.py`` run.
:param ctxt: the build context
:type ctxt: `Context`
:param summary: path to the file containing the coverage summary
:param coverdir: name of the directory containing the per-module coverage
details
:param include: patterns of files or directories to include in the report
:param exclude: patterns of files or directories to exclude from the report
"""
assert summary, 'Missing required attribute "summary"'
summary_line_re = re.compile(r'^(?P<module>.*?)\s+(?P<stmts>\d+)\s+'
r'(?P<exec>\d+)\s+(?P<cov>\d+)%\s+'
r'(?:(?P<missing>(?:\d+(?:-\d+)?(?:, )?)*)\s+)?'
r'(?P<file>.+)$')
fileset = FileSet(ctxt.basedir, include, exclude)
missing_files = []
for filename in fileset:
if os.path.splitext(filename)[1] != '.py':
continue
missing_files.append(filename)
covered_modules = set()
def handle_file(element, modname):
if not coverdir:
return
fp = ctxt.resolve(os.path.join(coverdir, modname.replace(".", "_") +
".py,cover"))
if not os.path.exists(fp):
log.info("No line by line coverage available for %s", modname)
return
try:
with open(fp) as f:
lines = []
for line in f:
if line.startswith(">"):
lines.append("1")
elif line.startswith("!"):
lines.append("0")
else:
lines.append("-")
element.append(xmlio.Element('line_hits')[' '.join(lines)])
except Exception, e:
log.info("Error while processing line by line coverage: %s", e)
try:
summary_file = open(ctxt.resolve(summary), 'r')
try:
coverage = xmlio.Fragment()
for summary_line in summary_file:
match = summary_line_re.search(summary_line)
if match:
modname = match.group(1)
filename = match.group(6)
if not os.path.isabs(filename):
filename = os.path.normpath(os.path.join(ctxt.basedir,
filename))
else:
filename = os.path.realpath(filename)
if not filename.startswith(ctxt.basedir):
continue
filename = filename[len(ctxt.basedir) + 1:]
if not filename in fileset:
continue
percentage = int(match.group(4).rstrip('%'))
num_lines = int(match.group(2))
missing_files.remove(filename)
covered_modules.add(modname)
module = xmlio.Element('coverage', name=modname,
file=filename.replace(os.sep, '/'),
percentage=percentage,
lines=num_lines)
coverage.append(module)
handle_file(module, modname)
for filename in missing_files:
modname = os.path.splitext(filename.replace(os.sep, '.'))[0]
if modname in covered_modules:
continue
covered_modules.add(modname)
module = xmlio.Element('coverage', name=modname,
file=filename.replace(os.sep, '/'),
percentage=0)
coverage.append(module)
ctxt.report('coverage', coverage)
finally:
summary_file.close()
except IOError, e:
log.warning('Error opening coverage summary file (%s)', e)
def trace(ctxt, summary=None, coverdir=None, include=None, exclude=None):
"""Extract data from a ``trace.py`` run.
:param ctxt: the build context
:type ctxt: `Context`
:param summary: path to the file containing the coverage summary
:param coverdir: name of the directory containing the per-module coverage
details
:param include: patterns of files or directories to include in the report
:param exclude: patterns of files or directories to exclude from the report
"""
assert summary, 'Missing required attribute "summary"'
assert coverdir, 'Missing required attribute "coverdir"'
summary_line_re = re.compile(r'^\s*(?P<lines>\d+)\s+(?P<cov>\d+)%\s+'
r'(?P<module>.*?)\s+\((?P<filename>.*?)\)')
coverage_line_re = re.compile(r'\s*(?:(?P<hits>\d+): )?(?P<line>.*)')
fileset = FileSet(ctxt.basedir, include, exclude)
missing_files = []
for filename in fileset:
if os.path.splitext(filename)[1] != '.py':
continue
missing_files.append(filename)
covered_modules = set()
def handle_file(elem, sourcefile, coverfile=None):
code_lines = set()
for lineno, linetype, line in loc.count(sourcefile):
if linetype == loc.CODE:
code_lines.add(lineno)
num_covered = 0
lines = []
if coverfile:
prev_hits = '0'
for idx, coverline in enumerate(coverfile):
match = coverage_line_re.search(coverline)
if match:
hits = match.group(1)
if hits: # Line covered
if hits != '0':
num_covered += 1
lines.append(hits)
prev_hits = hits
elif coverline.startswith('>'): # Line not covered
lines.append('0')
prev_hits = '0'
elif idx not in code_lines: # Not a code line
lines.append('-')
prev_hits = '0'
else: # A code line not flagged by trace.py
if prev_hits != '0':
num_covered += 1
lines.append(prev_hits)
elem.append(xmlio.Element('line_hits')[' '.join(lines)])
num_lines = not lines and len(code_lines) or \
len([l for l in lines if l != '-'])
if num_lines:
percentage = int(round(num_covered * 100 / num_lines))
else:
percentage = 0
elem.attr['percentage'] = percentage
elem.attr['lines'] = num_lines
try:
summary_file = open(ctxt.resolve(summary), 'r')
try:
coverage = xmlio.Fragment()
for summary_line in summary_file:
match = summary_line_re.search(summary_line)
if match:
modname = match.group(3)
filename = match.group(4)
if not os.path.isabs(filename):
filename = os.path.normpath(os.path.join(ctxt.basedir,
filename))
else:
filename = os.path.realpath(filename)
if not filename.startswith(ctxt.basedir):
continue
filename = filename[len(ctxt.basedir) + 1:]
if not filename in fileset:
continue
missing_files.remove(filename)
covered_modules.add(modname)
module = xmlio.Element('coverage', name=modname,
file=filename.replace(os.sep, '/'))
sourcefile = file(ctxt.resolve(filename))
try:
coverpath = ctxt.resolve(coverdir, modname + '.cover')
if os.path.isfile(coverpath):
coverfile = file(coverpath, 'r')
else:
log.warning('No coverage file for module %s at %s',
modname, coverpath)
coverfile = None
try:
handle_file(module, sourcefile, coverfile)
finally:
if coverfile:
coverfile.close()
finally:
sourcefile.close()
coverage.append(module)
for filename in missing_files:
modname = os.path.splitext(filename.replace(os.sep, '.'))[0]
if modname in covered_modules:
continue
covered_modules.add(modname)
module = xmlio.Element('coverage', name=modname,
file=filename.replace(os.sep, '/'),
percentage=0)
filepath = ctxt.resolve(filename)
fileobj = file(filepath, 'r')
try:
handle_file(module, fileobj)
finally:
fileobj.close()
coverage.append(module)
ctxt.report('coverage', coverage)
finally:
summary_file.close()
except IOError, e:
log.warning('Error opening coverage summary file (%s)', e)
def figleaf(ctxt, summary=None, include=None, exclude=None):
"""Extract data from a ``Figleaf`` run.
:param ctxt: the build context
:type ctxt: `Context`
:param summary: path to the file containing the coverage summary
:param include: patterns of files or directories to include in the report
:param exclude: patterns of files or directories to exclude from the report
"""
from figleaf import get_lines
coverage = xmlio.Fragment()
try:
fileobj = open(ctxt.resolve(summary))
except IOError, e:
log.warning('Error opening coverage summary file (%s)', e)
return
coverage_data = pickle.load(fileobj)
fileset = FileSet(ctxt.basedir, include, exclude)
for filename in fileset:
base, ext = os.path.splitext(filename)
if ext != '.py':
continue
modname = base.replace(os.path.sep, '.')
realfilename = ctxt.resolve(filename)
interesting_lines = get_lines(open(realfilename))
if not interesting_lines:
continue
covered_lines = coverage_data.get(realfilename, set())
percentage = int(round(len(covered_lines) * 100 / len(interesting_lines)))
line_hits = []
for lineno in xrange(1, max(interesting_lines)+1):
if lineno not in interesting_lines:
line_hits.append('-')
elif lineno in covered_lines:
line_hits.append('1')
else:
line_hits.append('0')
module = xmlio.Element('coverage', name=modname,
file=filename.replace(os.sep, '/'),
percentage=percentage,
lines=len(interesting_lines),
line_hits=' '.join(line_hits))
coverage.append(module)
ctxt.report('coverage', coverage)
def _normalize_filenames(ctxt, filenames, fileset):
for filename in filenames:
if not os.path.isabs(filename):
filename = os.path.normpath(os.path.join(ctxt.basedir,
filename))
else:
filename = os.path.realpath(filename)
if not filename.startswith(ctxt.basedir):
continue
filename = filename[len(ctxt.basedir) + 1:]
if filename not in fileset:
continue
yield filename.replace(os.sep, '/')
def unittest(ctxt, file_=None):
"""Extract data from a unittest results file in XML format.
:param ctxt: the build context
:type ctxt: `Context`
:param file\_: name of the file containing the test results
"""
assert file_, 'Missing required attribute "file"'
try:
fileobj = file(ctxt.resolve(file_), 'r')
try:
total, failed = 0, 0
results = xmlio.Fragment()
for child in xmlio.parse(fileobj).children():
test = xmlio.Element('test')
for name, value in child.attr.items():
if name == 'file':
value = os.path.realpath(value)
if value.startswith(ctxt.basedir):
value = value[len(ctxt.basedir) + 1:]
value = value.replace(os.sep, '/')
else:
continue
test.attr[name] = value
if name == 'status' and value in ('error', 'failure'):
failed += 1
for grandchild in child.children():
test.append(xmlio.Element(grandchild.name)[
grandchild.gettext()
])
results.append(test)
total += 1
if failed:
ctxt.error('%d of %d test%s failed' % (failed, total,
total != 1 and 's' or ''))
ctxt.report('test', results)
finally:
fileobj.close()
except IOError, e:
log.warning('Error opening unittest results file (%s)', e)
except xmlio.ParseError, e:
log.warning('Error parsing unittest results file (%s)', e)
|
py
|
1a5afe70c96d387cfb6477900b6119e954efe6ac
|
# :coding: utf-8
# :copyright: Copyright (c) 2015 ftrack
def test_add_remove_and_query_scopes_for_tasks(session, new_task, new_scope):
'''Add, remove and query scopes for task.'''
query_string = 'Task where scopes.name is {0}'.format(new_scope['name'])
tasks = session.query(query_string)
assert len(tasks) == 0
new_task['scopes'].append(new_scope)
session.commit()
tasks = session.query(query_string)
assert len(tasks) == 1 and tasks[0] == new_task
new_task['scopes'].remove(new_scope)
session.commit()
tasks = session.query(query_string)
assert len(tasks) == 0
|
py
|
1a5afec59d3b88ffda3482521385ab1e70fa5d76
|
#Creating Dictionary
d = {22:"ss",23:"ftp",53:"dns"}
print(d)
#Length
print(len(d))
#Deleting
del d[22]
print(d)
|
py
|
1a5afee450a27f310a307b4b1fb15d8f08c8ed31
|
from django.conf import settings
from zerver.lib.actions import set_default_streams, bulk_add_subscriptions, \
internal_prep_stream_message, internal_send_private_message, \
create_stream_if_needed, create_streams_if_needed, do_send_messages, \
do_add_reaction_legacy
from zerver.models import Realm, UserProfile, Message, Reaction, get_system_bot
from typing import Any, Dict, List, Mapping, Text
def send_initial_pms(user):
# type: (UserProfile) -> None
organization_setup_text = ""
if user.is_realm_admin:
help_url = user.realm.uri + "/help/getting-your-organization-started-with-zulip"
organization_setup_text = ("* [Read the guide](%s) for getting your organization "
"started with Zulip\n" % (help_url,))
content = (
"Hello, and welcome to Zulip!\n\nThis is a private message from me, Welcome Bot. "
"Here are some tips to get you started:\n"
"* Download our [Desktop and mobile apps](/apps)\n"
"* Customize your account and notifications on your [Settings page](#settings)\n"
"* Type `?` to check out Zulip's keyboard shortcuts\n"
"%s"
"\n"
"The most important shortcut is `r` to reply.\n\n"
"Practice sending a few messages by replying to this conversation. If you're not into "
"keyboards, that's okay too; clicking anywhere on this message will also do the trick!") \
% (organization_setup_text,)
internal_send_private_message(user.realm, get_system_bot(settings.WELCOME_BOT),
user, content)
def setup_initial_streams(realm):
# type: (Realm) -> None
stream_dicts = [
{'name': "general"},
{'name': "new members",
'description': "For welcoming and onboarding new members. If you haven't yet, "
"introduce yourself in a new thread using your name as the topic!"},
{'name': "zulip",
'description': "For discussing Zulip, Zulip tips and tricks, and asking "
"questions about how Zulip works"}] # type: List[Mapping[str, Any]]
create_streams_if_needed(realm, stream_dicts)
set_default_streams(realm, {stream['name']: {} for stream in stream_dicts})
# For the first user in a realm
def setup_initial_private_stream(user):
# type: (UserProfile) -> None
stream, _ = create_stream_if_needed(user.realm, "core team", invite_only=True,
stream_description="A private stream for core team members.")
bulk_add_subscriptions([stream], [user])
def send_initial_realm_messages(realm):
# type: (Realm) -> None
welcome_bot = get_system_bot(settings.WELCOME_BOT)
# Make sure each stream created in the realm creation process has at least one message below
# Order corresponds to the ordering of the streams on the left sidebar, to make the initial Home
# view slightly less overwhelming
welcome_messages = [
{'stream': Realm.DEFAULT_NOTIFICATION_STREAM_NAME,
'topic': "welcome",
'content': "This is a message on stream `%s` with the topic `welcome`. We'll use this stream "
"for system-generated notifications." % (Realm.DEFAULT_NOTIFICATION_STREAM_NAME,)},
{'stream': "core team",
'topic': "private streams",
'content': "This is a private stream. Only admins and people you invite "
"to the stream will be able to see that this stream exists."},
{'stream': "general",
'topic': "welcome",
'content': "Welcome to #**general**."},
{'stream': "new members",
'topic': "onboarding",
'content': "A #**new members** stream is great for onboarding new members.\n\nIf you're "
"reading this and aren't the first person here, introduce yourself in a new thread "
"using your name as the topic! Type `c` or click on `New Topic` at the bottom of the "
"screen to start a new topic."},
{'stream': "zulip",
'topic': "topic demonstration",
'content': "Here is a message in one topic. Replies to this message will go to this topic."},
{'stream': "zulip",
'topic': "topic demonstration",
'content': "A second message in this topic. With [turtles](/static/images/cute/turtle.png)!"},
{'stream': "zulip",
'topic': "second topic",
'content': "This is a message in a second topic.\n\nTopics are similar to email subjects, "
"in that each conversation should get its own topic. Keep them short, though; one "
"or two words will do it!"},
] # type: List[Dict[str, Text]]
messages = [internal_prep_stream_message(
realm, welcome_bot,
message['stream'], message['topic'], message['content']) for message in welcome_messages]
message_ids = do_send_messages(messages)
# We find the one of our just-sent messages with turtle.png in it,
# and react to it. This is a bit hacky, but works and is kinda a
# 1-off thing.
turtle_message = Message.objects.get(
id__in=message_ids,
subject='topic demonstration',
content__icontains='cute/turtle.png')
do_add_reaction_legacy(welcome_bot, turtle_message, 'turtle')
|
py
|
1a5b0059cd1773afc588ec4ddcdbfa9d398d0f5e
|
# -*- coding: utf-8 -*-
import scrapy
"""
需求: 大分类:名称,URL; 小分类名称,URL; 图书的标题,图片,出版商,价格信息
步骤:
1. 创建爬虫项目
2. 创建爬虫
3. 完善爬虫
3.1 修改起始URL
3.2 提取大分类,小分类标题和URL, 根据小分类的URL构建列表页请求
3.3 解析列表页, 提取图书标题和封面图片的URL, 构建详情页的请求
3.4 解析详情页, 提取出版社, 价格(构建价格请求)
3.5 解析价格
3.6 实现列表页分页
"""
from copy import deepcopy
import re
class BookSpider(scrapy.Spider):
name = 'book'
allowed_domains = ['suning.com']
# 3.1 修改起始URL
start_urls = ['https://book.suning.com/']
def parse(self, response):
# 提取大分类和小分类信息
# 获取包含大分类, 小分类的div列表
divs = response.xpath('//div[@class="menu-item"]')
# 获取子菜单div列表
sub_divs = response.xpath('//div[contains(@class, "menu-sub")]')
# 遍历divs, 获取大分类小分类信息
for div in divs:
item = {}
item['b_category_name'] = div.xpath('./dl/dt/h3/a/text()').extract_first()
item['b_category_url'] = div.xpath('./dl/dt/h3/a/@href').extract_first()
# 获取包含小分类信息的a标签列表
a_s = div.xpath('./dl/dd/a')
# 如果a_s是一个空列表, 就要从子菜单中提取小分类信息
if len(a_s) == 0:
sub_div = sub_divs[divs.index(div)]
a_s = sub_div.xpath('./div[1]/ul/li/a')
# 遍历a_s, 提取小分类信息
for a in a_s:
item['s_category_name'] = a.xpath('./text()').extract_first()
item['s_category_url'] = a.xpath('./@href').extract_first()
# print(item)
# 根据小分类的URL, 构建列表页请求
# 当在循环外边创建的item对象(或字典), 传递给下一个解析函数时候, 需要进行一个深拷贝, 否则数据就会错乱
yield scrapy.Request(item['s_category_url'], callback=self.parse_book_list, meta={'item': deepcopy(item)})
def parse_book_list(self, response):
# 3.3 解析列表页, 提取图书标题和封面图片的URL, 构建详情页的请求
item = response.meta['item']
# 获取包含图书信息的li标签列表
lis = response.xpath('//*[@id="filter-results"]/ul/li')
# 遍历lis, 获取图书名称和图片信息
for li in lis:
item['book_name'] = li.xpath('.//p[@class="sell-point"]/a/text()').extract_first()
item['book_img'] = 'https:' + li.xpath('.//img/@src2').extract_first()
# print(item)
# 构建详情页的请求
# 详情URL
detail_url = 'https:' + li.xpath('.//p[@class="sell-point"]/a/@href').extract_first()
# 构建详情页请求, 交给引擎
yield scrapy.Request(detail_url, callback=self.parse_book_detail, meta={'item': deepcopy(item)})
# 实现翻页
# 1. 获取下一页URL
# 观察规律:
# 第一页: https://list.suning.com/1-264003-0.html
# 第2页: https://list.suning.com/1-264003-1.html
# 第3页: https://list.suning.com/1-264003-2.html
# print(response.url)
# 把 https://list.suning.com/1-262518-0-0-0-0.html 改为 https://list.suning.com/1-262518-0.html
current_url = re.sub('(-0)+', '-0', response.url)
# print(current_url)
# param.currentPage = "0";
# param.pageNumbers = "61";
current_page = int(re.findall('param.currentPage\s*=\s*"(\d+)"', response.text)[0])
page_numbers = int(re.findall('param.pageNumbers\s*=\s*"(\d+)"', response.text)[0])
# print(current_page)
# print(page_numbers)
# 计算下1页的页号
next_page = current_page + 1
# 如果有下一页, 就生成下一页的URL
if next_page < page_numbers:
# 构建下一页的URL
# 生成替换的后缀
subfix = '-{}.html'.format(next_page)
# 举例: 1 -> -1.html
next_url = re.sub('-\d+.html', subfix, current_url)
print(next_url)
# 构建下一页请求
yield scrapy.Request(next_url, callback=self.parse_book_list, meta={'item': deepcopy(item)})
def parse_book_detail(self, response):
# 解析详情页
# 3.4 解析详情页, 提取出版社, 价格(构建价格请求)
item = response.meta['item']
item['book_publisher'] = response.xpath('//*[@id="productName"]/a/text()').extract_first()
# - 1. 准备价格URL模板
price_url = 'https://pas.suning.com/nspcsale_0_000000000{}_000000000{}_{}_20_021_0210101.html'
# - 2. 从详情页URL中提取数据
datas = re.findall('https://product.suning.com/(\d+)/(\d+).html', response.url)[0]
# - 3. 生成完整价格URL
price_url = price_url.format(datas[1], datas[1], datas[0])
# print(item)
# print(price_url)
# 构建价格请求
yield scrapy.Request(price_url, callback=self.parse_price, meta={'item': item})
def parse_price(self, response):
# 解析价格
item = response.meta['item']
# 思路: 如果有促销价格, 就使用促销价格, 如果没有就使用网络价格
price = re.findall('"promotionPrice":\s*"(\d+.\d+)"', response.text)
if len(price) == 0:
price = re.findall('"netPrice":\s*"(\d+.\d+)"', response.text)
# 获取价格信息
item['price'] = price[0]
# print(item)
# 把数据交给引擎
yield item
|
py
|
1a5b011fcd2d71bbb330c19c471982f22ac35e65
|
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <[email protected]>
# Dimitri Desvillechabrol <[email protected]>,
# <[email protected]>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
"""Sequana GUI. Can also be used for any snakemake pipeline"""
import sys
import os
import shutil
import re
import time
import psutil
import subprocess as sp
import argparse
import signal
import pkg_resources
from PyQt5 import QtCore, QtGui
from PyQt5 import QtWidgets as QW
from PyQt5.QtCore import Qt, QTemporaryDir
from sequana_pipetools import snaketools
from sequanix.utils import YamlDocParser, on_cluster, rest2html
from .ui import Ui_MainWindow
from .widgets import (
Browser,
QIPythonWidget,
About,
FileBrowser,
SVGDialog,
WarningMessage,
CriticalMessage,
PreferencesDialog,
HelpDialog,
SnakemakeDialog,
Tools,
QPlainTextEditLogger,
Ruleform,
)
import easydev
import colorlog
logger = colorlog.getLogger(__name__)
def sigint_handler(*args): # pragma: no cover
"""Handler for the SIGINT signal."""
sys.stderr.write("\r")
if (
QW.QMessageBox.question(
None, "", "Are you sure you want to quit?", QW.QMessageBox.Yes | QW.QMessageBox.No, QW.QMessageBox.No
)
== QW.QMessageBox.Yes
):
QW.QApplication.quit()
class BaseFactory(Tools):
"""Tab on top are all based on this abstract class
It should provide access to a snakefile and its config file as well
as working directory.
Currently, the :class:`SequanaFactory` and :class:`GenericFactory` are
implemented.
"""
def __init__(self, mode, run_button):
super(BaseFactory, self).__init__()
self.mode = mode
self._run_button = run_button
# And finally the working directory
self._directory_browser = FileBrowser(directory=True)
self._directory_browser.clicked_connect(self._switch_off_run)
def _switch_off_run(self): # pragma: no cover
self.debug("Switching off run button")
self._run_button.setEnabled(False)
def copy(self, source, target, force): # pragma: no cover
if os.path.exists(target) and force is False:
save_msg = WarningMessage("The file <i>{0}</i> already exists in the working directory".format(source))
save_msg.setInformativeText("Do you want to overwrite it?")
save_msg.setStandardButtons(QW.QMessageBox.Yes | QW.QMessageBox.Discard | QW.QMessageBox.Cancel)
save_msg.setDefaultButton(QW.QMessageBox.Yes)
# Yes == 16384
# Save == 2048
retval = save_msg.exec_()
if retval in [16384, 2048]:
self.warning("Overwritting %s" % target)
super(BaseFactory, self).copy(source, target)
else:
super(BaseFactory, self).copy(source, target)
def _copy_snakefile(self, force=False): # pragma: no cover
if self.snakefile is None:
self.info("No pipeline selected yet")
return # nothing to be done
if self.directory is None:
self.info("No working directory selected yet (copy snakefile)")
return
# source and target filenames
target = self.directory + os.sep + os.path.basename(self.snakefile)
if os.path.exists(target) and easydev.md5(target) == easydev.md5(self.snakefile):
self.info("Target and source (pipeline) are identical. Skipping copy.")
# if target and source are identical, nothing to do
return
# if filename are identical but different, do we want to overwrite it ?
if os.path.basename(self.snakefile) == target:
self.warning("%s exists already in %s" % (self.snakefile, self.directory))
return
self.info("Copying snakefile in %s " % self.directory)
self.copy(self.snakefile, target, force=force)
def _copy_configfile(self): # pragma: no cover
if self.configfile is None:
self.info("No config selected yet")
return # nothing to be done
if self._directory_browser.path_is_setup() is False:
self.info("No working directory selected yet (copy config)")
return
# FIXME
# THis does not check the formatting so when saved, it is different
# from original even though parameters are the same...
target = self.directory + os.sep + os.path.basename(self.configfile)
if os.path.exists(target) and easydev.md5(target) == easydev.md5(self.configfile):
self.info("Target and source (pipeline) are identical. Skipping copy.")
return
self.info("Copying config in %s " % self.directory)
self.copy(self.configfile, self.directory)
def _get_directory(self):
filename = self._directory_browser.get_filenames()
if len(filename):
return filename
else:
return None
directory = property(_get_directory)
def __repr__(self):
return "%s Factory" % self.mode
class SequanaFactory(BaseFactory):
def __init__(self, run_button, combobox):
super(SequanaFactory, self).__init__("sequana", run_button)
self._imported_config = None
self._choice_button = combobox
# Some widgets to be used: a file browser for paired files
fastq_filter = "Fastq file (*.fastq *.fastq.gz *.fq *.fq.gz)"
self._sequana_paired_tab = FileBrowser(paired=True, file_filter=fastq_filter)
self._sequana_readtag_label2 = QW.QLabel("Read tag (e.g. _[12].fastq)")
self._sequana_readtag_lineedit2 = QW.QLineEdit("_R[12]_")
# Set the file browser input_directory tab
self._sequana_directory_tab = FileBrowser(directory=True)
self._sequana_readtag_label = QW.QLabel("Read tag (e.g. _[12].fastq)")
self._sequana_readtag_lineedit = QW.QLineEdit("_R[12]_")
self._sequana_pattern_label = QW.QLabel("<div><i>Optional</i> pattern (e.g., Samples_1?/*fastq.gz)</div>")
self._sequana_pattern_lineedit = QW.QLineEdit()
# triggers/connectors
self._sequana_directory_tab.clicked_connect(self._switch_off_run)
self._choice_button.activated.connect(self._switch_off_run)
self._sequana_paired_tab.clicked_connect(self._switch_off_run)
def _get_pipeline(self):
index = self._choice_button.currentIndex()
if index == 0:
return None
else:
return self._choice_button.currentText()
pipeline = property(_get_pipeline)
def _get_snakefile(self):
if self.pipeline:
module = snaketools.Module(self.pipeline)
return module.snakefile
snakefile = property(_get_snakefile)
def _get_configfile(self):
if self.pipeline:
module = snaketools.Module(self.pipeline)
return module.config
configfile = property(_get_configfile)
def _get_clusterconfigfile(self):
if self.pipeline:
module = snaketools.Module(self.pipeline)
return module.cluster_config
clusterconfigfile = property(_get_clusterconfigfile)
def _get_multiqcconfigfile(self):
if self.pipeline:
module = snaketools.Module(self.pipeline)
return module.multiqc_config
multiqcconfigfile = property(_get_multiqcconfigfile)
def _get_schemafile(self):
if self.pipeline:
module = snaketools.Module(self.pipeline)
return module.schema_config
schemafile = property(_get_schemafile)
def _get_config(self):
if self._imported_config:
cfg = snaketools.SequanaConfig(self._imported_config)
return cfg
if self.configfile:
try:
cfg = snaketools.SequanaConfig(self.configfile)
return cfg
except AssertionError:
self.warning("Warning: could not parse the config file")
return
config = property(_get_config)
def __repr__(self): # pragma: no cover
in1 = self._sequana_directory_tab.get_filenames()
in2 = self._sequana_paired_tab.get_filenames()
txt = super(SequanaFactory, self).__repr__()
txt += "\npipeline:%s\ninput:\n - %s\n - %s\n - directory:%s\n"
if self.clusterconfigfile:
txt += " - cluster config: %s\n" % self.clusterconfigfile
if self.schemafile:
txt += " - schema config: %s" % self.schemafile
if self.multiqcconfigfile:
txt += " - schema config: %s" % self.multiqcconfigfile
return txt % (self.pipeline, in1, in2, self.directory)
class GenericFactory(BaseFactory):
def __init__(self, run_button):
super(GenericFactory, self).__init__("generic", run_button)
# Define the Snakefile browser and config widgets
self._snakefile_browser = FileBrowser(directory=False)
self._config_browser = FileBrowser(directory=False, file_filter="YAML file (*.json *.yaml *.yml)")
# when a snakefile or config is chosen, switch off run button
self._config_browser.clicked_connect(self._switch_off_run)
self._snakefile_browser.clicked_connect(self._switch_off_run)
self._schema = None
self._multiqcconfigfile = None
def _return_none(self, this):
if this is None or len(this) == 0:
return None
else:
return this
def _get_snakefile(self):
return self._return_none(self._snakefile_browser.get_filenames())
snakefile = property(_get_snakefile)
def _get_configfile(self):
return self._return_none(self._config_browser.get_filenames())
configfile = property(_get_configfile)
def _get_schemafile(self):
return self._return_none(self._schema)
schemafile = property(_get_schemafile)
def _get_multiqcconfigfile(self):
return self._return_none(self._multiqcconfigfile)
multiqcconfigfile = property(_get_multiqcconfigfile)
def _get_config(self): # pragma: no cover
filename = self._return_none(self._config_browser.get_filenames())
if filename:
try:
configfile = snaketools.SequanaConfig(filename)
except AssertionError:
self.critical("Could not parse the config file %s" % filename)
return
except Exception:
self.critical("Could not parse the config file %s. 2" % filename)
return
return configfile
config = property(_get_config)
def is_runnable(self):
flag1 = self._directory_browser.path_is_setup()
flag2 = self._snakefile_browser.path_is_setup()
flag3 = self._config_browser.path_is_setup()
# flag1 and flag2 are compulsary
# flag3 (configfile) is most tricky to handle since it may be required
# or not. So we just deal with the flag1 and 2
return flag1 and flag2
def __repr__(self):
txt = super(GenericFactory, self).__repr__()
txt += "\nsnakefile:%s\nconfigfile:%s\ndirectory:%s\nschema:%s\nmultiqcconfigfile:%s"
return txt % (self.snakefile, self.configfile, self.directory, self.schemafile, self.multiqcconfigfile)
class SequanixGUI(QW.QMainWindow, Tools):
"""
If quiet, progress bar cannot work.
- do not copy again requirements if already there
- extension of the different widgets ?
Developer Guide
------------------
- The GUI is designed with qt designer as much as possible.
- All GUI objects are in the **ui** attributes. Additional dialog such as the
snakemake and preferences dialog have their own modules and stored in attributes
ending in _dialog
"""
_not_a_rule = {"requirements", "gatk_bin", "input_directory", "input_pattern", "ignore"}
_browser_keywords = {"reference"}
_to_exclude = ["atac-seq", "compressor"]
def __init__(self, parent=None, ipython=True, user_options={}):
super(SequanixGUI, self).__init__(parent=parent)
colorlog.getLogger().setLevel("INFO")
colorlog.info("Welcome to Sequana GUI (aka Sequanix)")
self._tempdir = QTemporaryDir()
self.shell = ""
self.shell_error = ""
self._colors = {
"green": QtGui.QColor(0, 170, 0),
"red": QtGui.QColor(170, 0, 0),
"orange": QtGui.QColor(170, 150, 0),
"blue": QtGui.QColor(0, 90, 154),
}
# some global attributes
self._undefined_section = "Parameters in no sections/rules"
# self._config = None
# Set the regex to catch steps in the progres bar
self._step_regex = re.compile("([0-9]+) of ([0-9]+) steps")
self._ipython_tab = ipython
self.initUI()
self.read_settings()
# this should be after initUI and read_settings
self.set_style_sheet()
# User option.
def isset(options, key):
if key in options and getattr(options, key):
return True
else:
return False
if isset(user_options, "wkdir"):
self.info("Setting working directory using user's argument %s" % user_options.wkdir)
if os.path.exists(user_options.wkdir) is False:
easydev.mkdirs(user_options.wkdir)
# We must use the absolute path
abspath = os.path.abspath(user_options.wkdir)
self.sequana_factory._directory_browser.set_filenames(abspath)
self.generic_factory._directory_browser.set_filenames(abspath)
if isset(user_options, "snakefile"):
filename = user_options.snakefile
if os.path.exists(filename) is True:
self.info("Setting snakefile using user's argument %s" % user_options.snakefile)
self.generic_factory._snakefile_browser.set_filenames(filename)
else:
self.error("%s does not exist" % filename)
self.ui.tabs_pipeline.setCurrentIndex(1)
if isset(user_options, "configfile"):
filename = user_options.configfile
if os.path.exists(filename) is True:
self.info("Setting config file using user's argument %s" % user_options.configfile)
self.generic_factory._config_browser.set_filenames(filename)
self.ui.tabs_pipeline.setCurrentIndex(1)
if isset(user_options, "pipeline"): # pragma: no cover
self.info("Setting Sequana pipeline %s " % user_options.pipeline)
pipelines = self.sequana_factory.valid_pipelines
if user_options.pipeline in pipelines:
index = self.ui.choice_button.findText(user_options.pipeline)
self.ui.choice_button.setCurrentIndex(index)
# set focus on pipeline tab
self.ui.tabs_pipeline.setCurrentIndex(0)
else:
self.error("unknown pipeline. Use one of %s " % pipelines)
if isset(user_options, "input_directory"): # pragma: no cover
directory = user_options.input_directory
self.info("Setting Sequana input directory")
if directory and os.path.exists(directory) is False:
self.warning("%s does not exist" % directory)
elif directory:
abspath = os.path.abspath(user_options.input_directory)
self.sequana_factory._sequana_directory_tab.set_filenames(abspath)
self.ui.tabs_pipeline.setCurrentIndex(0)
self.ui.tabWidget.setCurrentIndex(0)
if isset(user_options, "input_files"):
directory = user_options.input_files
self.info("Setting Sequana input files")
dirtab = self.sequana_factory._sequana_paired_tab
dirtab._set_paired_filenames([os.path.abspath(f) for f in user_options.input_files])
self.ui.tabs_pipeline.setCurrentIndex(0)
self.ui.tabWidget.setCurrentIndex(1)
if isset(user_options, "sequana_configfile"):
cfg = user_options.sequana_configfile
self.info("Replace Sequana config file")
self.menuImportConfig(cfg)
if isset(user_options, "schemafile"):
schemafile = user_options.schemafile
self.info("Set the schema file")
self.menuImportSchema(schemafile)
# We may have set some pipeline, snakefile, working directory
self.create_base_form()
self.fill_until_starting()
def initUI(self):
# The logger is not yet set, so we use the module directly
colorlog.info("Initialising GUI")
# Set up the user interface from Designer. This is the general layout
# without dedicated widgets and connections
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
# 2 more dialogs from designer
self.preferences_dialog = PreferencesDialog(self)
self.snakemake_dialog = SnakemakeDialog(self)
self.preferences_dialog.ui.buttonBox.accepted.connect(self.set_level)
# The IPython dialog, which is very useful for debugging
if self._ipython_tab is True:
self.ipyConsole = QIPythonWidget(
customBanner="Welcome to Sequanix embedded ipython console\n"
+ "The entire GUI interface is stored in the variable gui\n"
+ "Note also that you can use this interface as a shell \n"
+ "command line interface preceding your command with ! character\n"
)
# self.ipyConsole.printText("The variable 'foo' andion.")
self.ipyConsole.execute("from sequana import *")
self.ipyConsole.execute("import sequana")
self.ipyConsole.execute("")
self.ipyConsole.pushVariables({"gui": self})
self.ui.layout_ipython.addWidget(self.ipyConsole)
# layout for config file parameters
widget_form = QW.QWidget()
self.form = QW.QVBoxLayout(widget_form)
self.form.setSpacing(10)
self.ui.scrollArea.setWidget(widget_form)
self.ui.scrollArea.setWidgetResizable(True)
self.ui.scrollArea.setMinimumHeight(200)
# layout for the snakemake output
self.output = QW.QTextEdit()
self.output.setReadOnly(True)
self.ui.layout_snakemake.addWidget(self.output)
# Add the new logging box widget to the layout
self.logTextBox = QPlainTextEditLogger(self)
self.logTextBox.setFormatter(
colorlog.ColoredFormatter(
"%(log_color)s%(asctime)s - %(levelname)s - %(message)s",
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red,bg_white",
},
)
)
colorlog.getLogger().addHandler(self.logTextBox)
self.set_level()
self.ui.layout_logger.addWidget(self.logTextBox.widget)
# Connectors to actions related to the menu bar
self.ui.actionQuit.triggered.connect(self.menuQuit)
self.ui.action_import_configfile.triggered.connect(self.menuImportConfig)
self.ui.action_import_schemafile.triggered.connect(self.menuImportSchema)
self.ui.actionHelp.triggered.connect(self.menuHelp)
self.ui.actionAbout.triggered.connect(self.menuAbout)
self.ui.actionSnakemake.triggered.connect(self.snakemake_dialog.exec_)
self.ui.actionPreferences.triggered.connect(self.preferences_dialog.exec_)
self.preferences_dialog.ui.preferences_options_general_tooltip_value.clicked.connect(self.set_style_sheet)
# connectors related to the pipeline tabs (pipeline/generic)
self.set_sequana_pipeline()
self.set_generic_pipeline()
# The run/save/dag footer buttons
self.connect_footer_buttons()
self.process = QtCore.QProcess(self)
self.process.started.connect(lambda: self.ui.run_btn.setEnabled(False))
self.process.started.connect(lambda: self.ui.stop_btn.setEnabled(True))
self.process.started.connect(lambda: self.ui.unlock_btn.setEnabled(False))
self.process.started.connect(lambda: self.start_progress)
self.process.started.connect(lambda: self.ui.save_btn.setEnabled(False))
self.process.started.connect(lambda: self.ui.tabs_pipeline.setEnabled(False))
self.process.finished.connect(lambda: self.ui.run_btn.setEnabled(True))
self.process.finished.connect(lambda: self.ui.stop_btn.setEnabled(False))
self.process.finished.connect(lambda: self.ui.unlock_btn.setEnabled(True))
self.process.finished.connect(lambda: self.ui.save_btn.setEnabled(True))
self.process.finished.connect(lambda: self.ui.tabs_pipeline.setEnabled(True))
self.process.finished.connect(self.end_run)
self.process.readyReadStandardOutput.connect(self.snakemake_data_stdout)
self.process.readyReadStandardError.connect(self.snakemake_data_error)
# This is for the show dag btn. Created here once for all
self.process1 = QtCore.QProcess(self)
self.process2 = QtCore.QProcess(self)
self.ui.tabWidget.currentChanged.connect(lambda: self.ui.run_btn.setEnabled(False))
# if we are on one of those clusters, switch to the cluster choice in
# the pipeline control combo box
if on_cluster() is True:
self.ui.comboBox_local.setCurrentText("cluster")
# connect show advanced button with the until/starting frame
self.ui.show_advanced_control.clicked.connect(self.click_advanced)
self.ui.frame_control.hide()
def _get_opacity(self):
dialog = self.preferences_dialog
box = dialog.ui.preferences_options_general_tooltip_value
if box.isChecked():
return 255
else:
return 0
tooltip_opacity = property(_get_opacity)
def set_style_sheet(self):
self.setStyleSheet(
"""QToolTip {
background-color: #aabbcc;
color: black;
border-style: double;
border-width: 3px;
border-color: green;
border-radius: 5px;
margin:5px;
opacity: %s;
} ;
"""
% self.tooltip_opacity
)
# |-----------------------------------------------------|
# | MENU related |
# |-----------------------------------------------------|
def menuImportConfig(self, configfile=None): # pragma: no cover
# The connector send a False signal but default is None
# so we need to handle the two cases
if self.snakefile is None:
self.error("You must set a pipeline first")
msg = WarningMessage(("You must set a pipeline first"))
msg.exec_()
return
if configfile and os.path.exists(configfile) is False:
self.error("Config file (%s) does not exists" % configfile)
return
if configfile is None or configfile is False:
self.info("Importing config file.")
file_filter = "YAML file (*.json *.yaml *.yml)"
browser = FileBrowser(file_filter=file_filter)
browser.browse_file()
configfile = browser.paths
if configfile:
self.sequana_factory._imported_config = configfile
else:
self.sequana_factory._imported_config = None
self.create_base_form()
def menuImportSchema(self, schemafile=None): # pragma: no cover
if schemafile:
self.generic_factory._schema = schemafile
return
self.info("Importing YAML schema file.")
file_filter = "YAML file (*.yaml *.yml)"
browser = FileBrowser(file_filter=file_filter)
browser.browse_file()
schemafile = browser.paths
if schemafile:
self.generic_factory._schema = schemafile
else:
self.generic_factory._schema = None
def menuAbout(self):
from sequana import version
url = "sequana.readthedocs.io"
widget = About()
widget.setText("Sequana version %s " % version)
widget.setInformativeText(
"""
Online documentation on <a href="http://%(url)s">%(url)s</a>
<br>
<br>
Authors: Thomas Cokelaer and Dimitri Desvillechabrol, 2017-2018
"""
% {"url": url}
)
widget.setWindowTitle("Sequana")
# widget.setStandardButtons(QW.QMessageBox.Ok)
retval = widget.exec_()
if retval == QW.QMessageBox.Ok:
widget.close()
def menuHelp(self):
url = "sequana.readthedocs.io"
pipelines_text = "<ul>\n"
url = "http://sequana.readthedocs.io/en/master"
for pipeline in snaketools.pipeline_names:
pipelines_text += ' <li><a href="%(url)s/pipeline_%(name)s.html">%(name)s</a></li>\n' % {
"url": url,
"name": pipeline,
}
pipelines_text += "</ul>"
msg = HelpDialog(pipelines=pipelines_text)
retval = msg.exec_()
if retval == QW.QMessageBox.Ok:
msg.close()
def menuQuit(self):
self._quit_msg = WarningMessage("Do you really want to quit ?")
self._quit_msg.setStandardButtons(QW.QMessageBox.Yes | QW.QMessageBox.No)
self._quit_msg.setDefaultButton(QW.QMessageBox.No)
quit_answer = self._quit_msg.exec_()
if quit_answer == QW.QMessageBox.Yes:
self.close()
def set_level(self):
# Set the level of the logging system
pref = self.preferences_dialog.ui
level = pref.preferences_options_general_logging_value.currentText()
#level = getattr(colorlog.logging.logging, level)
level = colorlog.getLogger().level
colorlog.getLogger().setLevel(level)
# ---------------------------------------------------------------
# More GUI / reading the snakefile (sequana or generic)
# ---------------------------------------------------------------
def set_sequana_pipeline(self):
# The pipeline connectors
pipelines = sorted(snaketools.pipeline_names)
pipelines = [this for this in pipelines if this not in self._to_exclude]
self.ui.choice_button.addItems(pipelines)
self.ui.choice_button.activated[str].connect(self._update_sequana)
# FIXME do we want to use this ?
self.ui.choice_button.installEventFilter(self)
# populate the factory with the choice button
self.sequana_factory = SequanaFactory(combobox=self.ui.choice_button, run_button=self.ui.run_btn)
self.sequana_factory.valid_pipelines = pipelines
# a local alias
saf = self.sequana_factory
# add widgets for the working dir
self.ui.layout_sequana_wkdir.addWidget(saf._directory_browser)
# add widget for the input sample
# self.ui.layout_sequana_input_files.addWidget(saf._sequana_paired_tab)
# hlayout = QW.QHBoxLayout()
# hlayout.addWidget(saf._sequana_readtag_label2)
# hlayout.addWidget(saf._sequana_readtag_lineedit2)
# self.ui.layout_sequana_input_files.addLayout(hlayout)
# add widget for the input directory
self.ui.layout_sequana_input_dir.addWidget(saf._sequana_directory_tab)
hlayout = QW.QHBoxLayout()
hlayout.addWidget(saf._sequana_readtag_label)
hlayout.addWidget(saf._sequana_readtag_lineedit)
self.ui.layout_sequana_input_dir.addLayout(hlayout)
hlayout = QW.QHBoxLayout()
hlayout.addWidget(saf._sequana_pattern_label)
hlayout.addWidget(saf._sequana_pattern_lineedit)
self.ui.layout_sequana_input_dir.addLayout(hlayout)
@QtCore.pyqtSlot(str)
def _update_sequana(self, index):
"""Change options form when user changes the pipeline."""
if self.ui.choice_button.findText(index) == 0:
self.clear_form()
self.rule_list = []
self.fill_until_starting()
return
self.info("Reading sequana %s pipeline" % index)
self.create_base_form()
# Is there a cluster config file ?
dialog = self.snakemake_dialog.ui
if self.sequana_factory.clusterconfigfile:
dialog.snakemake_options_cluster_cluster__config_value.set_filenames(self.sequana_factory.clusterconfigfile)
else:
dialog.snakemake_options_cluster_cluster__config_value.set_filenames("")
self.fill_until_starting()
self.switch_off()
# Reset imported config file in SequanaFactory
self.sequana_factory._imported_config = None
def set_generic_pipeline(self):
self.generic_factory = GenericFactory(self.ui.run_btn)
gaf = self.generic_factory
# The config file connectors
gaf._config_browser.clicked_connect(self.create_base_form)
# Update the main UI with
self.ui.layout_generic_snakefile.addWidget(gaf._snakefile_browser)
self.ui.layout_generic_config.addWidget(gaf._config_browser)
self.ui.layout_generic_wkdir.addWidget(gaf._directory_browser)
# When user press the cancel button, the config file browser is reset
self.ui.cancel_push_button.clicked.connect(self.generic_factory._config_browser.set_empty_path)
# ---------------------------------------------------------------------
# Footer connectors
# ---------------------------------------------------------------------
def connect_footer_buttons(self):
self.ui.run_btn.setEnabled(False)
self.ui.run_btn.clicked.connect(self.click_run)
self.ui.stop_btn.clicked.connect(self.click_stop)
self.ui.stop_btn.setEnabled(False)
self.ui.unlock_btn.clicked.connect(self.ui.run_btn.setEnabled)
self.ui.unlock_btn.clicked.connect(self.unlock_snakemake)
self.ui.unlock_btn.setEnabled(True)
self.ui.report_btn.setEnabled(True)
self.ui.report_btn.clicked.connect(self.open_report)
self.ui.save_btn.clicked.connect(self.save_project)
self.ui.dag_btn.setEnabled(False)
self.ui.dag_btn.clicked.connect(self.show_dag)
# -----------------------------------------------------------------
# function to link to the factory (sequana or generic)
# -----------------------------------------------------------------
def _get_mode(self):
# figure out if we are dealing with a sequana pipeline or not
index = self.ui.tabs_pipeline.currentIndex()
if index == 0:
return "sequana"
elif index == 1:
return "generic"
mode = property(_get_mode)
def _get_factory(self):
return getattr(self, "%s_factory" % self.mode)
factory = property(_get_factory)
def _get_config(self):
return getattr(self, "%s_factory" % self.mode).config
config = property(_get_config)
def _get_configfile(self):
return getattr(self, "%s_factory" % self.mode).configfile
configfile = property(_get_configfile)
def _get_snakefile(self):
return getattr(self, "%s_factory" % self.mode).snakefile
snakefile = property(_get_snakefile)
def _get_working_dir(self):
return getattr(self, "%s_factory" % self.mode).directory
working_dir = property(_get_working_dir)
# ----------------------------------------------------------------------
# Snakemake related (config, running)
# ----------------------------------------------------------------------
def fill_until_starting(self):
active_list = [w.get_name() for w in self.rule_list if w.get_do_rule()]
self.ui.until_box.clear()
self.ui.until_box.addItems([None] + active_list)
self.ui.starting_box.clear()
self.ui.starting_box.addItems([None] + active_list)
# ----------------------------------------------------------
# Config file related
# ---------------------------------------------------------
def _set_focus_on_config_tab(self):
# Set focus on config file
if self._ipython_tab:
self.ui.tabs.setCurrentIndex(3)
else:
self.ui.tabs.setCurrentIndex(2)
# --------------------------------------------------------------------
# Advanced control
# --------------------------------------------------------------------
def click_advanced(self):
if self.ui.frame_control.isHidden():
self.ui.frame_control.show()
else:
self.ui.frame_control.hide()
# --------------------------------------------------------------------
# Others
# --------------------------------------------------------------------
def clear_layout(self, layout):
"""Clean all widgets contained in a layout."""
while layout.count():
child = layout.takeAt(0)
if child.widget() is not None:
child.widget().deleteLater()
elif child.layout() is not None:
self.clear_layout(child.layout())
# --------------------------------------------------------------------
# Running snakemake
# --------------------------------------------------------------------
def _clean_line(self, line):
# TODO: surely there is a better way to do that and not overlap
# with tools.py ...
line = line.replace("b'\\r'", "")
line = line.replace("b'\r'", "")
line = line.replace("b'\\r '", "")
line = line.replace("b'\r '", "")
line = line.replace("b' '", "")
line = line.replace("\\t", " " * 4)
line = line.replace("'b'", "")
for this in ["b'", 'b"', "\r"]:
if line.startswith(this):
line = line.replace(this, "")
if line.startswith('b"'):
line = line.replace('b"', "")
line = line.rstrip("\\x1b[0m")
line = line.replace("\\x1b[33m", "")
return line
def snakemake_data_stdout(self): # pragma: no cover
"""Read standard output of snakemake process"""
data = str(self.process.readAllStandardOutput())
self.shell += data
self.update_progress_bar(data)
for this in data.split("\\n"):
line = this.strip()
if line and len(line) > 3 and "complete in" not in line: # prevent all b'' strings
line = self._clean_line(line)
if len(line.strip()) == 0:
continue
self.output.append('<font style="color:blue">' + line + "</font>")
def snakemake_data_error(self):
"""Read error output of snakemake process"""
error = str(self.process.readAllStandardError())
self.shell_error += error
self.update_progress_bar(error)
for this in error.split("\\n"):
line = this.strip()
if line and len(line) > 3 and "complete in" not in line: # prevent all b'' strings
line = self._clean_line(line)
if line.startswith("b'"):
line = line[2:]
line.rstrip("'")
grouprex = self._step_regex.findall(line)
if grouprex:
self.output.append('<font style="color:orange">' + line + "</font>")
elif "Error" in line:
self.output.append('<font style="color:red">' + line + "</font>")
else:
self.output.append('<font style="color:green">' + line + "</font>")
def get_until_starting_option(self):
"""Return list with starting rule and end rule."""
until_rule = self.ui.until_box.currentText()
starting_rule = self.ui.starting_box.currentText()
option = []
if until_rule:
option += ["--no-hooks", "-U", until_rule]
if starting_rule:
option += ["-R", starting_rule]
return option
def _get_snakemake_command(self, snakefile): # pragma: no cover
"""If the cluster option is selected, then the cluster field in
the snakemake menu must be set to a string different from empty string.
If we are on TARS, we also must set the option to cluster (not local)
If one of the previous cases is true, this function returns None
"""
dialog = self.snakemake_dialog # an alias
snakemake_line = ["-s", snakefile, "--stat", "stats.txt", "-p"]
if self.ui.comboBox_local.currentText() == "local":
if on_cluster():
msg = WarningMessage(
(
"You are on TARS cluster. Please set the"
"batch options and select the cluster option (not local)"
)
)
msg.exec_()
return None
snakemake_line += dialog.get_snakemake_local_options()
elif self.ui.comboBox_local.currentText() == "cluster":
cluster = dialog.ui.snakemake_options_cluster_cluster_value.text()
if len(cluster.strip()) == 0:
msg = WarningMessage(
(
"You selected a 'cluster run' but the "
"cluster preferences are not set. Either switch to a local "
"run or set a correct string in the Snakemake options menu "
"(in cluster tab/ cluster field.)"
)
)
msg.exec_()
return None
snakemake_line += dialog.get_snakemake_cluster_options()
# cluster_config = dialog.ui.snakemake_options_cluster_config_value.text()
# cluster_config = cluster_config.strip()
# if len(cluster_config):
# snakemake_line += ["--cluster-config", cluster_config]
snakemake_line += dialog.get_snakemake_general_options()
snakemake_line += self.get_until_starting_option()
others = self.snakemake_dialog.ui.snakemake_options_general_custom.text()
if others.strip():
snakemake_line += others.split()
if self.configfile:
configfile = os.path.basename(self.configfile)
snakemake_line += ["--configfile", configfile]
return snakemake_line
def _set_pb_color(self, color):
self.ui.progressBar.setStyleSheet(
"""
QProgressBar {{
color: black;
border: 2px solid grey;
margin: 2px;
border-radius: 5px;
text-align: center;
}}
QProgressBar::chunk {{
background: {};
}}""".format(
color
)
)
# pal = self.ui.progressBar.palette()
# pal.setColor(QtGui.QPalette.Highlight, self._colors['blue'])
# self.ui.progressBar.setPalette(pal)
def click_run(self):
# set focus on the snakemake output
if self.snakefile is None or self.working_dir is None:
self.warning("Working directory or snakefile not set.")
return
self.ui.tabs.setCurrentIndex(0)
self.shell_error = ""
self.shell = ""
# Prepare the command and working directory.
if self.working_dir is None:
self.warning("Set the working directory first")
return
# We copy the sequana and genereic snakefile into a filename called
# Snakefile
snakefile = self.working_dir + os.sep + os.path.basename(self.snakefile)
if os.path.exists(snakefile) is False:
self.critical("%s does not exist" % snakefile)
return
snakemake_args = self._get_snakemake_command(snakefile)
if snakemake_args is None:
return
# the progress bar
self._set_pb_color(self._colors["blue"].name())
self.ui.progressBar.setValue(1)
# Start process
# If an argument contains spaces, we should use quotes. However,
# with PyQt quotes must be escaped
args = []
for this in snakemake_args:
if re.search(r"\s", this) is True:
args.append('"%s"' % this)
else:
args.append(this)
snakemake_args = args
self.info("Starting process with snakemake %s " % " ".join(snakemake_args))
self.output.clear()
self.process.setWorkingDirectory(self.working_dir)
self.process.start("snakemake", snakemake_args)
# -------------------------------------------------------------------
# Create the base form
# -------------------------------------------------------------------
def create_base_form(self):
"""Create form with all options necessary for a pipeline.
::
########################################################
# valid python docstring to be interepreted by sphinx
#
# section:
# item1: 10
# item2: 20
"""
self.rule_list = []
if self.config is None:
self.clear_form()
return
self.info("Creating form based on config file")
self.clear_form()
rules_list = list(self.config._yaml_code.keys())
# We do not sort the list of rules anymore so that it is like in the
# config file
# rules_list.sort()
self.necessary_dict = {}
# For each section, we create a widget (RuleForm). For isntance, first,
# one is accessible as follows: gui.form.itemAt(0).widget()
docparser = YamlDocParser(self.configfile)
import ruamel.yaml.comments
for count, rule in enumerate(rules_list):
self.debug("Scanning rule %s" % rule)
# Check if this is a dictionnary
contains = self.config._yaml_code[rule]
# If this is a section/dictionary, create a section
if isinstance(contains, (ruamel.yaml.comments.CommentedMap, dict)) and (
rule not in SequanixGUI._not_a_rule
):
# Get the docstring from the Yaml section/rule
docstring = docparser._block2docstring(rule)
# Get any special keywords
specials = docparser._get_specials(rule)
# self.ui.preferences_options_general_addbrowser_value
dialog = self.preferences_dialog.ui
option = dialog.preferences_options_general_addbrowser_value.text()
option = option.strip()
option = option.replace(";", " ").replace(",", " ")
if len(option):
keywords = option.split()
else:
keywords = []
keywords += self._browser_keywords
keywords = list(set(keywords))
rule_box = Ruleform(rule, contains, count, keywords, specials=specials)
rule_box.connect_all_option(lambda: self.ui.run_btn.setEnabled(False))
# Try to interpret it with sphinx
try:
self.debug("parsing docstring of %s" % rule)
comments = rest2html(docstring).decode()
rule_box.setToolTip(comments)
except Exception as err:
print(err)
self.warning("Could not interpret docstring of %s" % rule)
rule_box.setToolTip("")
self.form.addWidget(rule_box)
self.rule_list.append(rule_box)
rule_box.connect_do(self.fill_until_starting)
else:
# this is a parameter in a section, which may be
# a list, a None or something else
if isinstance(contains, list):
self.necessary_dict = dict(self.necessary_dict, **{rule: contains})
elif contains is None or contains in ["''", '""']:
self.necessary_dict = dict(self.necessary_dict, **{rule: None})
else:
self.necessary_dict = dict(self.necessary_dict, **{rule: "{0}".format(contains)})
# if this is a generic pipeline, you may have parameters outside of a
# section
if self.mode == "generic" and len(self.necessary_dict):
rule_box = Ruleform(self._undefined_section, self.necessary_dict, -1, generic=True)
self.form.addWidget(rule_box)
self._set_focus_on_config_tab()
# ----------------------------------------------------------
# STOP footer button
# ----------------------------------------------------------
def click_stop(self):
"""The stop button"""
self._set_pb_color(self._colors["orange"].name())
# For windows:
# http://stackoverflow.com/questions/8232544/how-to-terminate-a-process-without-os-kill-osgeo4w-python-2-5
if self.process.state() != 0:
pid = self.process.pid()
self.warning("Process {} running , stopping it... ".format(pid))
# We must use a ctrl+C interruption so that snakemake
# handles the interruption smoothly. However, child processes
# are lost so we also need to get their IDs and kill them.
self.info("killing the main snakemake process. This may take a few seconds ")
try:
self.info("process pid={} being killed".format(self.process.pid()))
pid_children = [this.pid for this in psutil.Process(pid).children(recursive=True)]
# Kills the main process
os.kill(pid, signal.SIGINT)
# And the children
for this in pid_children: # pragma: no cover
self.info("Remove pid {} ".format(this))
try:
os.kill(this, signal.SIGINT)
except Exception as err:
print(err)
time.sleep(4)
except Exception as err:
print(err)
pass # already stopped ?
self.info("Process killed successfully.")
self.ui.save_btn.setEnabled(True)
self.ui.run_btn.setEnabled(True)
self.ui.stop_btn.setEnabled(False)
self.ui.tabs_pipeline.setEnabled(True)
# --------------------------------------------------------------------
# Progress bar
# --------------------------------------------------------------------
def update_progress_bar(self, line):
"""Parse with a regex to retrieve current step and total step."""
grouprex = self._step_regex.findall(line)
# Use last "x of y" (not the first item at position 0)
if grouprex:
step = int(grouprex[-1][0]) / float(grouprex[-1][1]) * 100
self.ui.progressBar.setValue(step)
if "Nothing to be done" in line:
self.ui.progressBar.setValue(100)
def start_progress(self):
self.ui.progressBar.setRange(0, 1)
def end_run(self): # pragma: no cover
if self.ui.progressBar.value() >= 100:
self._set_pb_color(self._colors["green"].name())
self.info("Run done. Status: successful")
else:
self._set_pb_color(self._colors["red"].name())
text = "Run manually to check the exact error or check the log."
if "--unlock" in self.shell_error:
text += "<br>You may need to unlock the directory. "
text += "click on Unlock button"
self.critical(text)
return
def _get_force(self):
dialog = self.preferences_dialog
box = dialog.ui.preferences_options_general_overwrite_value
return box.isChecked()
def _set_force(self, boolean): # pragma: no cover
assert boolean in [True, False]
dialog = self.preferences_dialog
box = dialog.ui.preferences_options_general_overwrite_value
box.setChecked(boolean)
force = property(_get_force, _set_force)
def save_project(self): # pragma: no cover
self.info("Saving project")
if self.configfile is None:
if self.mode == "generic":
if self.generic_factory.is_runnable():
self.critical("save_project: Generic case without config file")
self._save_teardown()
else:
msg = WarningMessage("You must select a Snakefile and a working directory.")
msg.exec_()
elif self.mode == "sequana":
msg = WarningMessage("You must choose a pipeline first.")
msg.exec_()
return
if self.working_dir is None:
self.critical("save_project: no working dir: return")
msg = WarningMessage("You must select a working directory first.")
msg.exec_()
return
try:
form_dict = dict(self.create_form_dict(self.form), **self.necessary_dict)
except AttributeError as err:
self.error(err)
msg = WarningMessage("You must choose a pipeline before saving.")
msg.exec_()
return
# get samples names or input_directory
if self.mode == "sequana":
self.info("Sequana case")
flag1 = self.sequana_factory._sequana_directory_tab.get_filenames()
flag2 = self.sequana_factory._sequana_paired_tab.get_filenames()
if (
self.ui.tabWidget.currentIndex() == 0
and len(flag1) == 0
or self.ui.tabWidget.currentIndex() == 1
and len(flag2) == 0
):
msg = WarningMessage("You must choose an input first.")
msg.exec_()
return
filename = self.sequana_factory._sequana_directory_tab.get_filenames()
form_dict["input_directory"] = filename
# If pattern provided, the input_directory is reset but used in
# the pattern as the basename
pattern = self.sequana_factory._sequana_pattern_lineedit.text()
if len(pattern.strip()):
form_dict["input_pattern"] = filename
form_dict["input_pattern"] += os.sep + pattern.strip()
form_dict["input_directory"] = ""
readtag = self.sequana_factory._sequana_readtag_lineedit.text()
if len(readtag.strip()):
form_dict["input_readtag"] = readtag
else:
form_dict["input_readtag"] = "_R[12]_"
elif self.mode == "generic":
# Here we save the undefined section in the form.
if self._undefined_section in form_dict.keys():
for key, value in form_dict[self._undefined_section].items():
form_dict[key] = value
del form_dict[self._undefined_section]
self.info("Generic case")
# Let us update the attribute with the content of the form
# This uses the user's information
cfg = self.config
cfg.config.update(form_dict)
cfg._update_yaml()
self.cfg = cfg
pref = self.preferences_dialog.ui
box = pref.preferences_options_general_schema_value
checked_schema = box.isChecked()
if self.working_dir:
# Save the configuration file
if self.mode == "sequana":
yaml_path = self.working_dir + os.sep + "config.yaml"
self.warning("copy requirements (if any)")
cfg.copy_requirements(target=self.working_dir)
elif self.mode == "generic":
yaml_path = os.sep.join((self.working_dir, os.path.basename(self.generic_factory.configfile)))
if os.path.isfile(yaml_path) and self.force is False:
save_msg = WarningMessage("The file <i>{0}</i> already exist".format(yaml_path))
save_msg.setInformativeText("Do you want to overwrite the file?")
save_msg.setStandardButtons(QW.QMessageBox.Yes | QW.QMessageBox.Discard | QW.QMessageBox.Cancel)
save_msg.setDefaultButton(QW.QMessageBox.Yes)
# Yes == 16384
# Save == 2048
retval = save_msg.exec_()
if retval in [16384, 2048]:
self.info("Saving config file (exist already)")
if checked_schema is False:
cfg.save(yaml_path)
else:
ret = self._check_and_save_config(cfg, yaml_path)
if ret is False:
# we do not want to save the config file and call
# _save_teardown
return
else:
self.warning("Saving config file (does not exist)")
if checked_schema is False:
cfg.save(yaml_path)
else:
ret = self._check_and_save_config(cfg, yaml_path)
if ret is False:
# we do not want to save the config file and call
# _save_teardown
return
# Save the configuration file for the cluster
if self.mode == "sequana" and self.sequana_factory.clusterconfigfile:
target = os.sep.join((self.working_dir, "cluster_config.json"))
shutil.copy(self.sequana_factory.clusterconfigfile, target)
# replace the name of the original file with the target one so
# that the target can be edited. The target will also be used in
# place of the original version when launnching snakemake!
self.snakemake_dialog.ui.snakemake_options_cluster_cluster__config_value.set_filenames(target)
# Save the multiqc_config file if provided in sequana pipeline
if self.mode == "sequana" and self.sequana_factory.multiqcconfigfile:
target = self.working_dir + os.sep + "multiqc_config.yaml"
shutil.copy(self.sequana_factory.multiqcconfigfile, target)
else:
self.critical("Config file not saved (no wkdir)")
msg = WarningMessage("You must set a working directory", self)
msg.exec_()
self.switch_off()
return
self._save_teardown()
def _save_teardown(self):
# Finally, save project and update footer run button
self.factory._copy_snakefile(self.force)
self.debug("Switching RUN and DAG button on")
self.ui.run_btn.setEnabled(True)
self.ui.dag_btn.setEnabled(True)
def _check_and_save_config(self, cfg, yaml_path):
# Here we save the config.yaml file when changed
# However, before that if there is a schema, we can
# use it. This is the case for some sequana pipelines
# return False if the config is invalid and do not save it
if self.mode == "sequana" and self.sequana_factory.schemafile is None:
self.warning("No Schema found to validate the config file")
if self.mode == "sequana" and self.sequana_factory.schemafile:
schemafile = self.sequana_factory.schemafile
elif self.mode == "generic" and self.generic_factory.schemafile:
schemafile = self.generic_factory.schemafile
else:
schemafile = None
if schemafile:
# check that the config file is correct before saving it
# only if we have a schema_config file.
self.info("Checking config file with provided schema file.")
# We save the config as a dummy temporary file to check it
# if correct, we then save the file. If not, we provide an help
# message
from easydev import TempFile
with TempFile(suffix=".yaml") as fout:
# save a temporary version
cfg.save(fout.name)
import ruamel
import warnings
from pykwalify.core import Core
# causes issue with ruamel.yaml 0.12.13. Works for 0.15
try:
warnings.simplefilter("ignore", ruamel.yaml.error.UnsafeLoaderWarning)
except:
pass
try:
# open the config and the schema file
c = Core(source_file=fout.name, schema_files=[schemafile])
except Exception as err:
print(err)
return False
try:
c.validate()
except Exception as err:
print(err)
error_msg = "<b>CRITICAL: INVALID CONFIGURATION FILE</b>\n"
error_msg += "<pre>" + str(err) + "</pre>"
self.critical(error_msg)
self.switch_off()
msg = WarningMessage(error_msg, self)
msg.exec_()
return False
cfg.save(yaml_path)
def switch_off(self):
self.debug("Switching RUN and DAG button off")
self.ui.run_btn.setEnabled(False)
self.ui.dag_btn.setEnabled(False)
def _reset_schema(self):
self.schemafile = None
# -----------------------------------------------------------------------
# SAVE LOG in a files
# -----------------------------------------------------------------------
def report_issues(self, filename="issue_debug.txt"):
# save shell + shell_error in working directory as well as snakemake and
# config file.
with open(filename, "w") as fh:
fh.write("\nsequanix logger ----------------------------------\n")
try:
file_logger = self.save_logger()
with open(file_logger, "r") as fin:
fh.write(fin.read())
except:
pass
fh.write("\nsequanix shell ----------------------------------\n")
try:
fh.writelines(self.shell)
except:
fh.write("No shell info")
fh.write("\nsequanix shell error ------------------------------\n")
try:
fh.writelines(self.shell_error)
except:
fh.write("No shell error info")
url = "https://github.com/sequana/sequana/issues "
print("Created a file called {} to be posted on {}.".format(filename, url))
self.init_logger()
# -----------------------------------------------------------------------
# UNLOCK footer button
# -----------------------------------------------------------------------
def unlock_snakemake(self):
if self.working_dir is None or self.snakefile is None:
self.warning("working directory or snakefile not set")
return
# FIXME this does not work as expected
self.ui.run_btn.setEnabled(False)
if os.path.exists(self.snakefile) is False:
self.warning("snakefile not found. should not happen")
return
self.cmd = ["snakemake", "-s", self.snakefile, "--unlock"]
self.info("Running " + " ".join(self.cmd))
self.info("Please wait a second. Unlocking working directory")
# focus on tab with snakemake output
self.ui.tabs.setCurrentIndex(0)
self.ui.tabs_pipeline.setEnabled(False)
try:
snakemake_proc = sp.Popen(self.cmd, cwd=self.working_dir)
snakemake_proc.wait()
except:
self.critical("Issue while unlocking the directory")
finally:
self.ui.tabs_pipeline.setEnabled(True)
self.info("unlocking done")
self.output.append('<font style="color:brown">Unlocking working directory</font>')
self.ui.run_btn.setEnabled(True)
self.ui.stop_btn.setEnabled(False)
# -----------------------------------------------------------------------
# DAG footer button
# -----------------------------------------------------------------------
def show_dag(self): # pragma: no cover
try:
# This command should work on various platform, just in case
# we add a try/except
if easydev.cmd_exists("dot") is False:
msg = "**dot** command not found. Use 'conda install graphviz' to install it."
self.warning(msg)
msg = WarningMessage((msg))
msg.exec_()
return
except:
pass
finally:
self.info("Creating DAG image.")
if self.snakefile is None:
self.warning("No snakefile")
return
# We just need the basename because we will run it in the wkdir
snakefile = os.path.basename(self.snakefile)
snakemake_line = ["snakemake", "-s", snakefile]
snakemake_line += ["--rulegraph"]
if self.mode == "generic" and self.configfile:
# make sure to copy the config file
snakemake_line += ["--configfile"]
snakemake_line += [os.path.basename(self.generic_factory.configfile)]
snakemake_line += self.get_until_starting_option()
# Where to save the SVG (temp directory)
svg_filename = self._tempdir.path() + os.sep + "test.svg"
self.info(snakemake_line)
self.process1.setWorkingDirectory(self.working_dir)
self.process1.setStandardOutputProcess(self.process2)
self.process1.start("snakemake", snakemake_line[1:])
self.process2.start("dot", ["-Tsvg", "-o", svg_filename])
self.process1.waitForFinished(50000)
self.process2.waitForFinished(50000)
if os.path.exists(svg_filename):
self.diag = SVGDialog(svg_filename)
self.diag.show()
else:
msg = "Could not create the DAG file."
error = str(self.process1.readAllStandardError())
msg = CriticalMessage(msg, error)
msg.exec_()
return
def open_report(self):
pref = self.preferences_dialog.ui
filename = pref.preferences_options_general_htmlpage_value.text()
if filename == "":
filename = QW.QFileDialog.getOpenFileNames(
self, "Select your HTML report", self.working_dir, "HTML files (*.html)"
)[0]
if len(filename) and os.path.exists(filename[0]):
filename = filename[0]
else:
self.warning("No valid HTML selected and none specified in the preferences.")
return
else: # we have a filename hardcoded in the preferences
if self.working_dir is None:
self.error("Working directory not set yet")
return
filename = self.working_dir + os.sep + filename
if os.path.exists(filename) is False:
self.error("%s page does not exist. Check the preferences dialog." % filename)
return
else:
self.info("Reading and openning %s" % filename)
url = "file://" + filename
# The browser executable itself
self.browser = Browser(url)
self.browser.show()
def create_form_dict(self, layout):
def _cleaner(value):
# This is to save the YAML file correctly since the widgets tend to
# convert None and empty strings as '""' or "''"
if value in ["None", None, "", '""', "''"]:
return None
else:
# this tries to convert to a list #issue #515
try:
return eval(value)
except:
return value
widgets = (layout.itemAt(i).widget() for i in range(layout.count()))
form_dict = {
w.get_name(): _cleaner(w.get_value()) if w.is_option() else self.create_form_dict(w.get_layout())
for w in widgets
}
return form_dict
def clear_form(self):
self.clear_layout(self.form)
def eventFilter(self, source, event):
"""Inactivate wheel event of combobox"""
if event.type() == QtCore.QEvent.Wheel and source is self.ui.choice_button:
return True
return False
# ---------------------------------------------------
# settings and close
# ---------------------------------------------------
def read_settings(self):
self.info("Reading settings")
settings = QtCore.QSettings("sequana_gui", "mainapp")
if settings.value("tab_position") is not None:
index = settings.value("tab_position")
self.ui.tabs_pipeline.setCurrentIndex(int(index))
if settings.value("tab_generic_position") is not None:
index = settings.value("tab_generic_position")
self.ui.tabs_generic.setCurrentIndex(int(index))
if settings.value("tab_sequana_position") is not None:
index = settings.value("tab_sequana_position")
self.ui.tabs_sequana.setCurrentIndex(int(index))
if settings.value("tab_sequana_input_position") is not None:
index = settings.value("tab_sequana_input_position")
self.ui.tabWidget.setCurrentIndex(int(index))
def write_settings(self):
settings = QtCore.QSettings("sequana_gui", "mainapp")
# tab snakemake output/logger/ipython
index = self.ui.tabs_pipeline.currentIndex()
settings.setValue("tab_position", index)
index = self.ui.tabs_generic.currentIndex()
settings.setValue("tab_generic_position", index)
index = self.ui.tabs_sequana.currentIndex()
settings.setValue("tab_sequana_position", index)
index = self.ui.tabWidget.currentIndex()
settings.setValue("tab_sequana_input_position", index)
def _close(self):
self.write_settings()
# end any process running that may be running
self.click_stop()
self._tempdir.remove()
try:
self.browser.close()
except:
pass
def closeEvent(self, event):
# Close button (red X)
self._close()
def close(self):
# Menu or ctrl+q
self._close()
super().close()
class Options(argparse.ArgumentParser):
def __init__(self, prog="sequana_gui"):
usage = """Sequanix (part of Sequana project) is a GUI for running Snakefiles
For Sequana project, you can pre-filled sections as follows:
sequanix -p quality_control -w analysis -i .
to prefill the quality_control pipeline to used the local directory to
search for input files (fastq.gz) and run the analysis in the working
directory "analysis"
For Generic snakefiles:
sequanix -s SNAKEFILE -c CONFIGFILE -w analysis
will run the snakefile (with its config file) into a working directory.
"""
description = """"""
super(Options, self).__init__(
usage=usage, prog=prog, description=description, formatter_class=easydev.SmartFormatter
)
group = self.add_argument_group("GENERAL")
group.add_argument("-w", "--working-directory", dest="wkdir", help="Set working directory", default=None)
group.add_argument("-n", "--no-splash", dest="nosplash", action="store_true", help="No splash screen")
group = self.add_argument_group("SEQUANA")
group.add_argument("-p", "--pipeline", dest="pipeline", default=None, help="A valid sequana pipeline name")
group_mut = group.add_mutually_exclusive_group()
group_mut.add_argument(
"-i",
"--input-directory",
dest="input_directory",
default=None,
help="input directory where to find the input data",
)
group_mut.add_argument("-f", "--input-files", dest="input_files", default=None, nargs="*", help="input files")
group.add_argument(
"-C",
"--replace-configfile",
dest="sequana_configfile",
default=None,
help="Replace default sequana config file with local configfile",
)
group = self.add_argument_group("GENERIC PIPELINES")
group.add_argument("-s", "--snakefile", dest="snakefile", default=None, help="A valid Snakefile")
group.add_argument(
"-c",
"--configfile",
dest="configfile",
default=None,
help="optional config file to be used by the Snakefile",
)
group.add_argument(
"-y", "--schema", dest="schemafile", default=None, help="optional schema file to check the config file"
)
def main(args=None): # pragma: no cover
if args is None:
args = sys.argv[:]
user_options = Options()
options = user_options.parse_args(args[1:])
signal.signal(signal.SIGINT, sigint_handler)
# QtCore.QCoreApplication.setAttribute(QtCore.Qt.AA_ShareOpenGLContexts)
app = QW.QApplication(sys.argv)
filename = pkg_resources.resource_filename("sequanix", "media/drawing.png")
if options.nosplash:
app.processEvents()
sequanix_gui = SequanixGUI(user_options=options)
sequanix_gui.show()
else:
# Show the splash screen for a few seconds
splash_pix = QtGui.QPixmap(filename)
splash = QW.QSplashScreen(splash_pix, Qt.WindowStaysOnTopHint)
splash.setMask(splash_pix.mask())
splash.show()
for i in range(0, 100):
t = time.time()
while time.time() < t + 0.5 / 100.0:
app.processEvents()
app.processEvents()
sequanix_gui = SequanixGUI(user_options=options)
sequanix_gui.show()
splash.finish(sequanix_gui)
# Make sure the main window is the active one
sequanix_gui.raise_()
sequanix_gui.activateWindow()
sys.exit(app.exec_())
if __name__ == "__main__":
main()
|
py
|
1a5b01763e3364146f9858cf90ad0de365868a88
|
from __future__ import print_function
import numpy as np
import nlcpy as vp
import numba
from math import *
import time
# target libraries
nb = 'numba'
vp_naive = 'nlcpy_naive'
vp_sca = 'nlcpy_sca'
@numba.stencil
def numba_kernel_1(din):
return (din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, -1, 0] +
din[0, 1, 0]
)
@numba.stencil
def numba_kernel_2(din):
return (din[0, 0, -2] +
din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, 0, 2] +
din[0, -2, 0] +
din[0, -1, 0] +
din[0, 2, 0] +
din[0, 1, 0]
)
@numba.stencil
def numba_kernel_3(din):
return (din[0, 0, -3] +
din[0, 0, -2] +
din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, 0, 2] +
din[0, 0, 3] +
din[0, -3, 0] +
din[0, -2, 0] +
din[0, -1, 0] +
din[0, 3, 0] +
din[0, 2, 0] +
din[0, 1, 0]
)
@numba.stencil
def numba_kernel_4(din):
return (din[0, 0, -4] +
din[0, 0, -3] +
din[0, 0, -2] +
din[0, 0, -1] +
din[0, 0, 0] +
din[0, 0, 1] +
din[0, 0, 2] +
din[0, 0, 3] +
din[0, 0, 4] +
din[0, -4, 0] +
din[0, -3, 0] +
din[0, -2, 0] +
din[0, -1, 0] +
din[0, 4, 0] +
din[0, 3, 0] +
din[0, 2, 0] +
din[0, 1, 0]
)
@numba.njit
def numba_launcher(din, dout, N, I=1):
for _ in range(I):
if N == 1:
numba_kernel_1(din, out=dout)
elif N == 2:
numba_kernel_2(din, out=dout)
elif N == 3:
numba_kernel_3(din, out=dout)
elif N == 4:
numba_kernel_4(din, out=dout)
def numba_impl(din, dout, N, I=1):
# warmup
numba_launcher(din, dout, N, I=1)
s = time.time()
numba_launcher(din, dout, N, I=I)
e = time.time()
return e - s
def nlcpy_naive_impl(din, dout, N, I=1):
loc_x = [i for i in range(-N, N+1)]
loc_y = [i for i in range(-N, N+1)]
vp.request.flush()
s = time.time()
for _ in range(I):
dout_v = dout[:, N:-N, N:-N]
dout_v[...] = 0
for lx in loc_x:
for ly in loc_y:
if lx != 0 and ly != 0:
continue
dout_v += din[:, N+ly:din.shape[-2]-N+ly, N+lx:din.shape[-1]-N+lx]
vp.request.flush()
e = time.time()
return e - s
def nlcpy_sca_impl(din, dout, N, I=1):
loc_x = [i for i in range(-N, N+1)]
loc_y = [i for i in range(-N, N+1)]
sin, sout = vp.sca.create_descriptor((din, dout))
d = vp.sca.empty_description()
for lx in loc_x:
for ly in loc_y:
if lx != 0 and ly != 0:
continue
d += sin[0, ly, lx]
kern = vp.sca.create_kernel(d, sout[0, 0, 0])
vp.request.flush()
s = time.time()
for _ in range(I):
kern.execute()
vp.request.flush()
e = time.time()
return e - s
def stencil_xya(din, dout, N, I=1, xp=np, lib=nb):
if lib is nb:
rt = numba_impl(din, dout, N, I)
if lib is vp_naive:
rt = nlcpy_naive_impl(din, dout, N, I)
if lib is vp_sca:
rt = nlcpy_sca_impl(din, dout, N, I)
return rt
|
py
|
1a5b018f7f72b4a2aef0cdd05845e296b479d8ba
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from django.db import models
from datetime import datetime
from django.core.mail import send_mail
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
import pytz
class WishPriority(models.Model):
class Meta:
verbose_name = _('Wish Priority')
verbose_name_plural = _('Wish Priorities')
ordering = ['weight']
weight = models.PositiveSmallIntegerField(verbose_name=_('Weight'))
priotxt = models.CharField(max_length=125, verbose_name=_('Priority'))
def __str__(self):
return self.priotxt
class Wish(models.Model):
class Meta:
verbose_name = _('Wish')
verbose_name_plural = _('Wishes')
ordering = ['priority']
_subject = _('%(couple)s wish will be fullfilled: %(wish)s')
_text_pattern = _('Sender: %(sender)s\nDate: %(date)s\n\nWish: \n%(wish)s')
priority = models.ForeignKey(WishPriority, verbose_name=_('Priority'))
wishcover = models.ImageField(upload_to='wishes')
wishtxt = models.TextField(verbose_name=_('The wish'))
wishisbn = models.CharField(max_length=120)
visible = models.BooleanField(default=False, verbose_name=_('Visible'))
dtticrt = models.DateTimeField(auto_now_add=True)
dttichg = models.DateTimeField(auto_now=True)
# save the timestamp when somebody says: i ordered it!
dttiord = models.DateTimeField(default=None, null=True, verbose_name=_('Date of order'))
def ordered(self, name, email):
self.dttiord = datetime.now().replace(tzinfo=pytz.timezone(settings.TIME_ZONE)) if settings.USE_TZ else datetime.now()
# Send mail with detailed information
if settings.WISH_ORDER_MAIL is not None:
text = self._text_pattern % {
'sender': name,
'date' : str(self.dttiord),
'wish' : self.wishtxt
}
subjectText = self._subject % {
'couple': '+'.join(settings.COUPLE_NAMES),
'wish': self.wishtxt[:20]
}
send_mail(subjectText, text, email, [settings.WISH_ORDER_MAIL])
|
py
|
1a5b050cf3f037be04e527b8523c565bd021cc91
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test various command line arguments and configuration file parameters."""
import os
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import get_datadir_path
class ConfArgsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
def run_test(self):
self.stop_node(0)
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
default_data_dir = get_datadir_path(self.options.tmpdir, 0)
new_data_dir = os.path.join(default_data_dir, 'newdatadir')
new_data_dir_2 = os.path.join(default_data_dir, 'newdatadir2')
# Check that using -datadir argument on non-existent directory fails
self.nodes[0].datadir = new_data_dir
self.assert_start_raises_init_error(0, ['-datadir='+new_data_dir], 'Error: Specified data directory "' + new_data_dir + '" does not exist.')
# Check that using non-existent datadir in conf file fails
conf_file = os.path.join(default_data_dir, "biblepay.conf")
# datadir needs to be set before [regtest] section
conf_file_contents = open(conf_file, encoding='utf8').read()
with open(conf_file, 'w', encoding='utf8') as f:
f.write("datadir=" + new_data_dir + "\n")
f.write(conf_file_contents)
self.assert_start_raises_init_error(0, ['-conf='+conf_file], 'Error reading configuration file: specified data directory "' + new_data_dir + '" does not exist.')
# Create the directory and ensure the config file now works
os.mkdir(new_data_dir)
self.start_node(0, ['-conf='+conf_file, '-wallet=w1'])
self.stop_node(0)
assert os.path.exists(os.path.join(new_data_dir, 'regtest', 'wallets', 'w1'))
# Ensure command line argument overrides datadir in conf
os.mkdir(new_data_dir_2)
self.nodes[0].datadir = new_data_dir_2
self.start_node(0, ['-datadir='+new_data_dir_2, '-conf='+conf_file, '-wallet=w2'])
assert os.path.exists(os.path.join(new_data_dir_2, 'regtest', 'wallets', 'w2'))
if __name__ == '__main__':
ConfArgsTest().main()
|
py
|
1a5b05305e6a86c997a63a5fc1a2b61c723cd323
|
from socket import *
serverName = '127.0.0.1'
serverPort = 12000
clientSocket = socket(AF_INET, SOCK_DGRAM)
message = raw_input("Input lower case senrence:")
clientSocket.sendto(message.encode(), (serverName, serverPort))
modifiedMessage, serverAddress = clientSocket.recvfrom(2048)
print(modifiedMessage.decode())
clientSocket.close()
|
py
|
1a5b06b8c8d9ac2729201b48788c9400243af24c
|
from setuptools import setup
# Ensure we're in the proper directory whether or not we're being used by pip.
#os.chdir(os.path.dirname(os.path.abspath(__file__)))
version='0.1'
with open('README.md', 'r') as f:
readme = f.read()
with open('LICENSE', 'r') as f:
license = f.read()
INSTALL_REQUIRES = (
'protobuf>=3.9.0',
'grpcio>=1.22.0',
'grpcio-tools>=1.22.0',
'googleapis-common-protos>=1.6.0',
)
setup(
name='alameda-api',
version=version,
description='Alameda API interfaces',
long_description=readme,
long_description_content_type="text/markdown",
author='ProphetStor Inc.',
author_email='[email protected]',
urls='https://github.com/containers-ai/api',
license=license,
packages=['alameda_api.v1alpha1.ai_service',
'alameda_api.v1alpha1.operator',
'alameda_api.v1alpha1.datahub',
'alameda_api.v1alpha1.datahub.common',
'alameda_api.v1alpha1.datahub.events',
'alameda_api.v1alpha1.datahub.gpu',
'alameda_api.v1alpha1.datahub.licenses',
'alameda_api.v1alpha1.datahub.metrics',
'alameda_api.v1alpha1.datahub.plannings',
'alameda_api.v1alpha1.datahub.predictions',
'alameda_api.v1alpha1.datahub.rawdata',
'alameda_api.v1alpha1.datahub.recommendations',
'alameda_api.v1alpha1.datahub.resources',
'alameda_api.v1alpha1.datahub.scores',
'alameda_api.v1alpha1.datahub.weavescope',
'alameda_api.v1alpha1.datahub.schemas',
'alameda_api.v1alpha1.datahub.applications',
'common'],
package_dir={
'alameda_api.v1alpha1.ai_service': 'alameda_api/v1alpha1/ai_service',
'alameda_api.v1alpha1.operator': 'alameda_api/v1alpha1/operator',
'alameda_api.v1alpha1.datahub': 'alameda_api/v1alpha1/datahub',
'alameda_api.v1alpha1.datahub.common': 'alameda_api/v1alpha1/datahub/common',
'alameda_api.v1alpha1.datahub.events': 'alameda_api/v1alpha1/datahub/events',
'alameda_api.v1alpha1.datahub.gpu': 'alameda_api/v1alpha1/datahub/gpu',
'alameda_api.v1alpha1.datahub.licenses': 'alameda_api/v1alpha1/datahub/licenses',
'alameda_api.v1alpha1.datahub.metrics': 'alameda_api/v1alpha1/datahub/metrics',
'alameda_api.v1alpha1.datahub.plannings': 'alameda_api/v1alpha1/datahub/plannings',
'alameda_api.v1alpha1.datahub.predictions': 'alameda_api/v1alpha1/datahub/predictions',
'alameda_api.v1alpha1.datahub.rawdata': 'alameda_api/v1alpha1/datahub/rawdata',
'alameda_api.v1alpha1.datahub.recommendations': 'alameda_api/v1alpha1/datahub/recommendations',
'alameda_api.v1alpha1.datahub.resources': 'alameda_api/v1alpha1/datahub/resources',
'alameda_api.v1alpha1.datahub.scores': 'alameda_api/v1alpha1/datahub/scores',
'alameda_api.v1alpha1.datahub.weavescope': 'alameda_api/v1alpha1/datahub/weavescope',
'alameda_api.v1alpha1.datahub.schemas': 'alameda_api/v1alpha1/datahub/schemas',
'alameda_api.v1alpha1.datahub.applications': 'alameda_api/v1alpha1/datahub/applications',
'common': 'common',
},
install_requires=INSTALL_REQUIRES,
zip_safe=False
)
|
py
|
1a5b07744c3cfe2524c5ad0868c6cafd24cf92f0
|
# This config does not work with the version of DD4hep that uses Geant4 units. This config performs a comparison
# with a reference geometry which might use the ROOT units convention. This mismatch somehow triggers a ROOT exception.
# We don't currently have a fix for this problem.
import FWCore.ParameterSet.Config as cms
process = cms.Process('VALID')
process.source = cms.Source('EmptySource')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(1)
)
process.load('Configuration.StandardSequences.DD4hep_GeometrySim_cff')
process.load("FWCore.MessageLogger.MessageLogger_cfi")
process.load("Geometry.MuonNumbering.muonNumberingInitialization_cfi")
process.load("Geometry.MuonNumbering.muonGeometryConstants_cff")
process.MessageLogger = cms.Service("MessageLogger",
destinations = cms.untracked.vstring('myLog'),
myLog = cms.untracked.PSet(
threshold = cms.untracked.string('INFO'),
)
)
process.DTGeometryESProducer = cms.ESProducer("DTGeometryESProducer",
DDDetector = cms.ESInputTag('',''),
appendToDataLabel = cms.string(''),
applyAlignment = cms.bool(False),
alignmentsLabel = cms.string(''),
attribute = cms.string('MuStructure'),
value = cms.string('MuonBarrelDT'),
fromDDD = cms.bool(True)
)
process.DDCompactViewESProducer = cms.ESProducer("DDCompactViewESProducer",
appendToDataLabel = cms.string('')
)
process.DDSpecParRegistryESProducer = cms.ESProducer("DDSpecParRegistryESProducer",
appendToDataLabel = cms.string('')
)
process.muonGeometryConstants.fromDD4Hep = True
process.valid = cms.EDAnalyzer("DTGeometryValidate",
infileName = cms.untracked.string('Geometry/DTGeometryBuilder/data/cmsRecoGeom-2021.root'),
outfileName = cms.untracked.string('validateDTGeometry.root'),
tolerance = cms.untracked.int32(7)
)
process.p = cms.Path(process.valid)
|
py
|
1a5b081f0917c8acc4ff7b4dbf4acda058faaff2
|
"""empty message
Revision ID: 09b6565cf4e7
Revises: 1aae34526a4a
Create Date: 2018-02-12 12:21:05.984927
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '09b6565cf4e7'
down_revision = '1aae34526a4a'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('user', sa.Column('signup_date', sa.DateTime(), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'signup_date')
# ### end Alembic commands ###
|
py
|
1a5b082eb33ff05ae76d2d0c5506f5a52a3c099e
|
import re
class PathwayFinder:
def __init__(self, dict_path):
self.pathway2id = dict()
self.pathway_id2keggid = dict()
with open(dict_path, 'r') as f:
for l in f:
cols = l[:-1].split('\t')
if 2 > len(cols):
print('Invalid line:', l[:-1])
continue
pathway_id = int(cols[0])
name = cols[1]
if "pathway" not in name:
name += ' pathway'
if name in self.pathway2id:
# print(name, pathway_id)
continue
self.pathway2id[name] = pathway_id
if 3 <= len(cols) and '' != cols[2]:
self.pathway_id2keggid[pathway_id] = cols[2]
extended = list()
for pname in self.pathway2id:
if "pathway" not in pname:
extended.append(pname + ' pathway')
else:
extended.append(pname)
self.regex = re.compile('|'.join(extended))
print('# of pathway regex', len(extended))
def tag(self, text):
pathways = list()
for idx, m in enumerate(self.regex.finditer(text)):
pathway_id = self.pathway2id[m.group()]
m_span = m.span()
bern_pathway_id = 'BERN:{}'.format(pathway_id)
id_fin = \
self.pathway_id2keggid[pathway_id] + '\t' + bern_pathway_id \
if pathway_id in self.pathway_id2keggid else bern_pathway_id
pathways.append({
"start": m_span[0],
"id": id_fin,
"end": m_span[1]
})
return pathways
if __name__ == '__main__':
pf = PathwayFinder('../normalization/resources/dictionary/best_dict_Pathway.txt')
print(pf.tag('Transferrin receptor-involved HIF-1 signaling pathway in cervical cancer. Cervical cancer is one of the most prevalent gynecologic malignancies and has remained an intractable cancer over the past decades. We analyzed the aberrant expression patterns of cervical cancer using RNA-Seq data from The Cancer Genome Atlas (TCGA). A total of 3352 differently expressed genes (DEGs) were identified in 306 cervical cancer samples compared with 3 control samples, with 1401 genes upregulated and 1951 downregulated. Under Kaplan-Meier analysis, 76 out of these DEGs with a significantly different survival rate between patients with high and low expression groups were picked out and uploaded to perform Kyoto Encyclopedia of Genes and Genomes (KEGG) pathway enrichment, which identified a transferrin receptor (TFRC)-involved HIF-1 signaling pathway (p < 0.05). Clinical data analysis showed that high TFRC expression in cervical cancers was associated with incrementally advanced stage, tumor status, and lymph nodes (all p-values <0.05), while multivariate analysis revealed that TFRC remained an independent prognostic variable for poor overall survival. In conclusion, our data indicated that the TFRC-involved HIF-1 signaling pathway may play a crucial role in cervical cancer.'))
|
py
|
1a5b0861487df4b8cad00d0a4366fb24b9b621ef
|
"This is the locale selecting middleware that will look at accept headers"
from django.conf import settings
from django.core.urlresolvers import (
LocaleRegexURLResolver, get_resolver, get_script_prefix, is_valid_path,
)
from django.http import HttpResponseRedirect
from django.utils import translation
from django.utils.cache import patch_vary_headers
from django.utils.functional import cached_property
class LocaleMiddleware(object):
"""
This is a very simple middleware that parses a request
and decides what translation object to install in the current
thread context. This allows pages to be dynamically
translated to the language the user desires (if the language
is available, of course).
"""
response_redirect_class = HttpResponseRedirect
def process_request(self, request):
language = translation.get_language_from_request(
request, check_path=self.is_language_prefix_patterns_used)
translation.activate(language)
request.LANGUAGE_CODE = translation.get_language()
def process_response(self, request, response):
language = translation.get_language()
language_from_path = translation.get_language_from_path(request.path_info)
if (response.status_code == 404 and not language_from_path
and self.is_language_prefix_patterns_used):
urlconf = getattr(request, 'urlconf', None)
language_path = '/%s%s' % (language, request.path_info)
path_valid = is_valid_path(language_path, urlconf)
if (not path_valid and settings.APPEND_SLASH
and not language_path.endswith('/')):
path_valid = is_valid_path("%s/" % language_path, urlconf)
if path_valid:
script_prefix = get_script_prefix()
# Insert language after the script prefix and before the
# rest of the URL
language_url = request.get_full_path().replace(
script_prefix,
'%s%s/' % (script_prefix, language),
1
)
return self.response_redirect_class(language_url)
if not (self.is_language_prefix_patterns_used
and language_from_path):
patch_vary_headers(response, ('Accept-Language',))
if 'Content-Language' not in response:
response['Content-Language'] = language
return response
@cached_property
def is_language_prefix_patterns_used(self):
"""
Returns `True` if the `LocaleRegexURLResolver` is used
at root level of the urlpatterns, else it returns `False`.
"""
for url_pattern in get_resolver(None).url_patterns:
if isinstance(url_pattern, LocaleRegexURLResolver):
return True
return False
|
py
|
1a5b08cff573be228356ed27880acbc76edd980a
|
# coding=utf-8
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2018
from __future__ import print_function
from future.builtins import *
import sys
import sysconfig
import os
import argparse
import streamsx.rest
def _stop(sas, cmd_args):
"""Stop the service if no jobs are running unless force is set"""
if not cmd_args.force:
status = sas.get_instance_status()
jobs = int(status['job_count'])
if jobs:
return status
return sas.stop_instance()
def run_cmd(args=None):
cmd_args = _parse_args(args)
sc = streamsx.rest.StreamingAnalyticsConnection(service_name=cmd_args.service_name)
sas = sc.get_streaming_analytics()
if cmd_args.subcmd == 'start':
result = sc.get_streaming_analytics().start_instance()
elif cmd_args.subcmd == 'stop':
result = _stop(sas, cmd_args)
elif cmd_args.subcmd == 'status':
result = sc.get_streaming_analytics().get_instance_status()
if not cmd_args.full_response:
return {k: result[k] for k in ('state', 'status', 'job_count')}
return result
def main(args=None):
""" Performs an action against a Streaming Analytics service.
"""
try:
sr = run_cmd(args)
sr['return_code'] = 0
except:
sr = {'return_code':1, 'error': sys.exc_info()}
return sr
def _parse_args(args):
""" Argument parsing
"""
cmd_parser = argparse.ArgumentParser(description='Control commands for a Streaming Analytics service.')
cmd_parser.add_argument('--service-name', help='Streaming Analytics service name')
cmd_parser.add_argument('--full-response', action='store_true', help='Print the full JSON response.')
subparsers = cmd_parser.add_subparsers(help='Supported commands', dest='subcmd')
parser_start = subparsers.add_parser('start', help='Start the service instance')
parser_status = subparsers.add_parser('status', help='Get the service status.')
parser_stop = subparsers.add_parser('stop', help='Stop the instance for the service.')
parser_stop.add_argument('--force', action='store_true', help='Stop the service even if jobs are running.')
return cmd_parser.parse_args(args)
if __name__ == '__main__':
sr = main()
rc = sr['return_code']
del sr['return_code']
if rc == 0:
print(sr)
else:
print(sr['error'][1], file=sys.stderr)
sys.exit(rc)
|
py
|
1a5b09e7fcd0cc2590a97bfdaad51717423a19d1
|
# -*- coding: utf-8 -*-
# Third party imports
import pytest
# Local application imports
from mosqito.sq_metrics import loudness_zwtv
from mosqito.utils import load
from validations.sq_metrics.loudness_zwtv.validation_loudness_zwtv import (
_check_compliance,
)
@pytest.mark.loudness_zwtv # to skip or run only loudness zwicker time-varying tests
def test_loudness_zwtv():
"""Test function for the script loudness_zwicker_time
Test function for the script loudness_zwtv with
.wav file as input. The input file is provided by ISO 532-1 annex
B4 and B5, the compliance is assessed according to section 6.1 of the
standard. One .png compliance plot is generated.
Parameters
----------
None
Outputs
-------
None
"""
# Test signal as input for time-varying loudness
# (from ISO 532-1 annex B4)
signal = {
"data_file": "tests/input/Test signal 10 (tone pulse 1 kHz 10 ms 70 dB).wav",
"xls": "tests/input/Results and tests for synthetic signals (time varying loudness).xlsx",
"tab": "Test signal 10",
"N_specif_bark": 8.5,
"field": "free",
}
# Load signal and compute third octave band spectrum
sig, fs = load(signal["data_file"], wav_calib=2 * 2 ** 0.5)
# Compute Loudness
N, N_spec, bark_axis, time_axis = loudness_zwtv(sig, fs, signal["field"])
loudness = {
"name": "Loudness",
"values": N,
"specific values": N_spec,
"freqs": bark_axis,
}
# Check axis dimensions
assert len(N) == len(time_axis)
assert N_spec.shape[1] == len(time_axis)
assert N_spec.shape[0] == len(bark_axis)
# Check ISO 532-1 compliance
assert _check_compliance(loudness, signal, "./tests/output/")
# test de la fonction
if __name__ == "__main__":
test_loudness_zwtv()
|
py
|
1a5b0b06c2e0af2ed63f08056371d3a2df2500fd
|
"""
Django settings for proyectos project.
Generated by 'django-admin startproject' using Django 2.2.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '%63(2+2l*-ig6p@uzwejrrl4z*0*@r33@=v40!2m10g0jqrjyp'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'general',
'bootstrap4',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'proyectos.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'proyectos.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
|
py
|
1a5b0b17e249654582919b7a818e54a21e69b0c9
|
"""
sentry.buffer.redis
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import six
from time import time
from binascii import crc32
from datetime import datetime
from django.db import models
from django.utils import timezone
from django.utils.encoding import force_bytes
from sentry.buffer import Buffer
from sentry.exceptions import InvalidConfiguration
from sentry.tasks.process_buffer import process_incr, process_pending
from sentry.utils import json, metrics
from sentry.utils.compat import pickle
from sentry.utils.hashlib import md5_text
from sentry.utils.imports import import_string
from sentry.utils.redis import get_cluster_from_options
class PendingBuffer(object):
def __init__(self, size):
assert size > 0
self.buffer = [None] * size
self.size = size
self.pointer = 0
def full(self):
return self.pointer == self.size
def empty(self):
return self.pointer == 0
def append(self, item):
assert not self.full()
self.buffer[self.pointer] = item
self.pointer += 1
def clear(self):
self.pointer = 0
def flush(self):
rv = self.buffer[:self.pointer]
self.clear()
return rv
class RedisBuffer(Buffer):
key_expire = 60 * 60 # 1 hour
pending_key = 'b:p'
def __init__(self, pending_partitions=1, incr_batch_size=2, **options):
self.cluster, options = get_cluster_from_options('SENTRY_BUFFER_OPTIONS', options)
self.pending_partitions = pending_partitions
self.incr_batch_size = incr_batch_size
assert self.pending_partitions > 0
assert self.incr_batch_size > 0
def validate(self):
try:
with self.cluster.all() as client:
client.ping()
except Exception as e:
raise InvalidConfiguration(six.text_type(e))
def _coerce_val(self, value):
if isinstance(value, models.Model):
value = value.pk
return force_bytes(value, errors='replace')
def _make_key(self, model, filters):
"""
Returns a Redis-compatible key for the model given filters.
"""
return 'b:k:%s:%s' % (
model._meta, md5_text(
'&'.
join('%s=%s' % (k, self._coerce_val(v)) for k, v in sorted(six.iteritems(filters)))
).hexdigest(),
)
def _make_pending_key(self, partition=None):
"""
Returns the key to be used for the pending buffer.
When partitioning is enabled, there is a key for each
partition, without it, there's only the default pending_key
"""
if partition is None:
return self.pending_key
assert partition >= 0
return '%s:%d' % (self.pending_key, partition)
def _make_pending_key_from_key(self, key):
"""
Return the pending_key for a given key. This is used
to route a key into the correct pending buffer. If partitioning
is disabled, route into the no partition buffer.
"""
if self.pending_partitions == 1:
return self.pending_key
return self._make_pending_key(crc32(key) % self.pending_partitions)
def _make_lock_key(self, key):
return 'l:%s' % (key, )
def _dump_values(self, values):
result = {}
for k, v in six.iteritems(values):
result[k] = self._dump_value(v)
return result
def _dump_value(self, value):
if isinstance(value, six.string_types):
type_ = 's'
elif isinstance(value, datetime):
type_ = 'd'
value = value.strftime('%s.%f')
elif isinstance(value, int):
type_ = 'i'
elif isinstance(value, float):
type_ = 'f'
else:
raise TypeError(type(value))
return (type_, six.text_type(value))
def _load_values(self, payload):
result = {}
for k, (t, v) in six.iteritems(payload):
result[k] = self._load_value((t, v))
return result
def _load_value(self, payload):
(type_, value) = payload
if type_ == 's':
return value
elif type_ == 'd':
return datetime.fromtimestamp(float(value)).replace(
tzinfo=timezone.utc
)
elif type_ == 'i':
return int(value)
elif type_ == 'f':
return float(value)
else:
raise TypeError('invalid type: {}'.format(type_))
def incr(self, model, columns, filters, extra=None):
"""
Increment the key by doing the following:
- Insert/update a hashmap based on (model, columns)
- Perform an incrby on counters
- Perform a set (last write wins) on extra
- Add hashmap key to pending flushes
"""
# TODO(dcramer): longer term we'd rather not have to serialize values
# here (unless it's to JSON)
key = self._make_key(model, filters)
pending_key = self._make_pending_key_from_key(key)
# We can't use conn.map() due to wanting to support multiple pending
# keys (one per Redis partition)
conn = self.cluster.get_local_client_for_key(key)
pipe = conn.pipeline()
pipe.hsetnx(key, 'm', '%s.%s' % (model.__module__, model.__name__))
# TODO(dcramer): once this goes live in production, we can kill the pickle path
# (this is to ensure a zero downtime deploy where we can transition event processing)
pipe.hsetnx(key, 'f', pickle.dumps(filters))
# pipe.hsetnx(key, 'f', json.dumps(self._dump_values(filters)))
for column, amount in six.iteritems(columns):
pipe.hincrby(key, 'i+' + column, amount)
if extra:
# Group tries to serialize 'score', so we'd need some kind of processing
# hook here
# e.g. "update score if last_seen or times_seen is changed"
for column, value in six.iteritems(extra):
# TODO(dcramer): once this goes live in production, we can kill the pickle path
# (this is to ensure a zero downtime deploy where we can transition event processing)
pipe.hset(key, 'e+' + column, pickle.dumps(value))
# pipe.hset(key, 'e+' + column, json.dumps(self._dump_value(value)))
pipe.expire(key, self.key_expire)
pipe.zadd(pending_key, time(), key)
pipe.execute()
metrics.incr('buffer.incr', skip_internal=True, tags={
'module': model.__module__,
'model': model.__name__,
})
def process_pending(self, partition=None):
if partition is None and self.pending_partitions > 1:
# If we're using partitions, this one task fans out into
# N subtasks instead.
for i in range(self.pending_partitions):
process_pending.apply_async(kwargs={'partition': i})
# Explicitly also run over the unpartitioned buffer as well
# to ease in transition. In practice, this should just be
# super fast and is fine to do redundantly.
pending_key = self._make_pending_key(partition)
client = self.cluster.get_routing_client()
lock_key = self._make_lock_key(pending_key)
# prevent a stampede due to celerybeat + periodic task
if not client.set(lock_key, '1', nx=True, ex=60):
return
pending_buffer = PendingBuffer(self.incr_batch_size)
try:
keycount = 0
with self.cluster.all() as conn:
results = conn.zrange(pending_key, 0, -1)
with self.cluster.all() as conn:
for host_id, keys in six.iteritems(results.value):
if not keys:
continue
keycount += len(keys)
for key in keys:
pending_buffer.append(key)
if pending_buffer.full():
process_incr.apply_async(
kwargs={
'batch_keys': pending_buffer.flush(),
}
)
conn.target([host_id]).zrem(pending_key, *keys)
# queue up remainder of pending keys
if not pending_buffer.empty():
process_incr.apply_async(kwargs={
'batch_keys': pending_buffer.flush(),
})
metrics.timing('buffer.pending-size', keycount)
finally:
client.delete(lock_key)
def process(self, key=None, batch_keys=None):
assert not (key is None and batch_keys is None)
assert not (key is not None and batch_keys is not None)
if key is not None:
batch_keys = [key]
for key in batch_keys:
self._process_single_incr(key)
def _process_single_incr(self, key):
client = self.cluster.get_routing_client()
lock_key = self._make_lock_key(key)
# prevent a stampede due to the way we use celery etas + duplicate
# tasks
if not client.set(lock_key, '1', nx=True, ex=10):
metrics.incr('buffer.revoked', tags={'reason': 'locked'}, skip_internal=False)
self.logger.debug('buffer.revoked.locked', extra={'redis_key': key})
return
pending_key = self._make_pending_key_from_key(key)
try:
conn = self.cluster.get_local_client_for_key(key)
pipe = conn.pipeline()
pipe.hgetall(key)
pipe.zrem(pending_key, key)
pipe.delete(key)
values = pipe.execute()[0]
if not values:
metrics.incr('buffer.revoked', tags={'reason': 'empty'}, skip_internal=False)
self.logger.debug('buffer.revoked.empty', extra={'redis_key': key})
return
model = import_string(values.pop('m'))
if values['f'].startswith('{'):
filters = self._load_values(json.loads(values.pop('f')))
else:
# TODO(dcramer): legacy pickle support - remove in Sentry 9.1
filters = pickle.loads(values.pop('f'))
incr_values = {}
extra_values = {}
for k, v in six.iteritems(values):
if k.startswith('i+'):
incr_values[k[2:]] = int(v)
elif k.startswith('e+'):
if v.startswith('['):
extra_values[k[2:]] = self._load_value(json.loads(v))
else:
# TODO(dcramer): legacy pickle support - remove in Sentry 9.1
extra_values[k[2:]] = pickle.loads(v)
super(RedisBuffer, self).process(model, incr_values, filters, extra_values)
finally:
client.delete(lock_key)
|
py
|
1a5b0c6775e2497f813ddbb3738ec490c7e74a5b
|
from collections import defaultdict, deque
from functools import partial
import augustus as ag
from augustus.system.base_cleaner import CleanerBase
class SMA(CleanerBase):
def calculate(self, ticker):
key = f'{ticker}_{self.frequency}'
close = self.data[key]['close']
return sum(close)/len(close)
|
py
|
1a5b0e7d6b5612b8c063f21432d8d1478e701af3
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
_DETECTRON_OPS_LIB = 'libcaffe2_detectron_ops_gpu.so'
_CMAKE_INSTALL_PREFIX = '/usr/local'
HIGHEST_BACKBONE_LVL = 5
LOWEST_BACKBONE_LVL = 2
import argparse
import cv2
# import glob
import copy
import logging
import os
import sys
import six
import time
import importlib
import pprint
import contextlib
import re
import scipy.sparse
import collections
import numpy as np
import numpy.random as npr
import matplotlib.pyplot as plt
# sys.path.append('/root/pmt/thirdparty/densepose/np') # path to where detectron
# bash this with model missing in 'filename' and add the path to sys
# find / -type f -iname "filename*"
# sys.path.append('path/to/where/cafffe2/is')
from caffe2.python import workspace
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import dyndep
from caffe2.python import scope
from caffe2.python import cnn
from caffe2.python import muji
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
from pycocotools import mask as COCOmask
from pycocotools.coco import COCO
import pycocotools.mask as mask_util
from .assets.config import assert_and_infer_cfg
from .assets.config import cfg
from .assets.config import merge_cfg_from_file
from .assets.config import load_cfg
import cython_bbox as cython_bbox
import cython_nms as cython_nms
from collections import defaultdict
from collections import OrderedDict
from six import string_types
from six.moves import cPickle as pickle
from six.moves import urllib
from glob import glob
from scipy.io import loadmat
from matplotlib.patches import Polygon
box_utils_bbox_overlaps = cython_bbox.bbox_overlaps
bbox_overlaps = cython_bbox.bbox_overlaps
logger = logging.getLogger(__name__)
FpnLevelInfo = collections.namedtuple(
'FpnLevelInfo',
['blobs', 'dims', 'spatial_scales']
)
def _progress_bar(count, total):
"""Report download progress.
Credit:
https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console/27871113
"""
bar_len = 60
filled_len = int(round(bar_len * count / float(total)))
percents = round(100.0 * count / float(total), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
sys.stdout.write(
' [{}] {}% of {:.1f}MB file \r'.
format(bar, percents, total / 1024 / 1024)
)
sys.stdout.flush()
if count >= total:
sys.stdout.write('\n')
def download_url(
url, dst_file_path, chunk_size=8192, progress_hook=_progress_bar
):
"""Download url and write it to dst_file_path.
Credit:
https://stackoverflow.com/questions/2028517/python-urllib2-progress-hook
"""
response = urllib.request.urlopen(url)
if six.PY2:
total_size = response.info().getheader('Content-Length').strip()
else:
total_size = response.info().get('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
with open(dst_file_path, 'wb') as f:
while 1:
chunk = response.read(chunk_size)
bytes_so_far += len(chunk)
if not chunk:
break
if progress_hook:
progress_hook(bytes_so_far, total_size)
f.write(chunk)
return bytes_so_far
def get_class_string(class_index, score, dataset):
class_text = dataset.classes[class_index] if dataset is not None else \
'id{:d}'.format(class_index)
return class_text + ' {:0.2f}'.format(score).lstrip('0')
def kp_connections(keypoints):
kp_lines = [
[keypoints.index('left_eye'), keypoints.index('right_eye')],
[keypoints.index('left_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('nose')],
[keypoints.index('right_eye'), keypoints.index('right_ear')],
[keypoints.index('left_eye'), keypoints.index('left_ear')],
[keypoints.index('right_shoulder'), keypoints.index('right_elbow')],
[keypoints.index('right_elbow'), keypoints.index('right_wrist')],
[keypoints.index('left_shoulder'), keypoints.index('left_elbow')],
[keypoints.index('left_elbow'), keypoints.index('left_wrist')],
[keypoints.index('right_hip'), keypoints.index('right_knee')],
[keypoints.index('right_knee'), keypoints.index('right_ankle')],
[keypoints.index('left_hip'), keypoints.index('left_knee')],
[keypoints.index('left_knee'), keypoints.index('left_ankle')],
[keypoints.index('right_shoulder'), keypoints.index('left_shoulder')],
[keypoints.index('right_hip'), keypoints.index('left_hip')],
]
return kp_lines
def colormap(rgb=False):
color_list = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32)
color_list = color_list.reshape((-1, 3)) * 255
if not rgb:
color_list = color_list[:, ::-1]
return color_list
def keypoint_utils_get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
keypoint_flip_map = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
return keypoints, keypoint_flip_map
def convert_from_cls_format(cls_boxes, cls_segms, cls_keyps):
"""Convert from the class boxes/segms/keyps format generated by the testing
code.
"""
box_list = [b for b in cls_boxes if len(b) > 0]
if len(box_list) > 0:
boxes = np.concatenate(box_list)
else:
boxes = None
if cls_segms is not None:
segms = [s for slist in cls_segms for s in slist]
else:
segms = None
if cls_keyps is not None:
keyps = [k for klist in cls_keyps for k in klist]
else:
keyps = None
classes = []
for j in range(len(cls_boxes)):
classes += [j] * len(cls_boxes[j])
return boxes, segms, keyps, classes
def vis_utils_vis_one_image(
im, boxes, segms=None, keypoints=None, body_uv=None, thresh=0.9,
kp_thresh=2, dpi=200, box_alpha=0.0, dataset=None, show_class=False,
ext='pdf'):
"""Visual debugging of detections."""
if isinstance(boxes, list):
boxes, segms, keypoints, classes = convert_from_cls_format(
boxes, segms, keypoints)
if boxes is None or boxes.shape[0] == 0 or max(boxes[:, 4]) < thresh:
return
dataset_keypoints, _ = keypoint_utils_get_keypoints()
if segms is not None and len(segms) > 0:
masks = mask_util.decode(segms)
color_list = colormap(rgb=True) / 255
kp_lines = kp_connections(dataset_keypoints)
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(kp_lines) + 2)]
# Display in largest to smallest order to reduce occlusion
areas = (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
sorted_inds = np.argsort(-areas)
IUV_fields = body_uv[1]
#
All_Coords = np.zeros(im.shape)
All_inds = np.zeros([im.shape[0],im.shape[1]])
K = 26
##
inds = np.argsort(boxes[:,4])
##
for i, ind in enumerate(inds):
entry = boxes[ind,:]
if entry[4] > 0.65:
entry=entry[0:4].astype(int)
####
output = IUV_fields[ind]
####
All_Coords_Old = All_Coords[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2],:]
All_Coords_Old[All_Coords_Old==0]=output.transpose([1,2,0])[All_Coords_Old==0]
All_Coords[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2],:]= All_Coords_Old
###
CurrentMask = (output[0,:,:]>0).astype(np.float32)
All_inds_old = All_inds[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2]]
All_inds_old[All_inds_old==0] = CurrentMask[All_inds_old==0]*i
All_inds[ entry[1] : entry[1]+output.shape[1],entry[0]:entry[0]+output.shape[2]] = All_inds_old
#
All_Coords[:,:,1:3] = 255. * All_Coords[:,:,1:3]
All_Coords[All_Coords>255] = 255.
All_Coords = All_Coords.astype(np.uint8)
return All_Coords
def envu_get_detectron_ops_lib():
"""Retrieve Detectron ops library."""
# Candidate prefixes for detectron ops lib path
prefixes = [_CMAKE_INSTALL_PREFIX, sys.prefix, sys.exec_prefix] + sys.path
# Candidate subdirs for detectron ops lib
subdirs = ['lib', 'torch/lib']
# Try to find detectron ops lib
for prefix in prefixes:
for subdir in subdirs:
ops_path = os.path.join(prefix, subdir, _DETECTRON_OPS_LIB)
if os.path.exists(ops_path):
#print('Found Detectron ops lib: {}'.format(ops_path))
return ops_path
raise Exception('Detectron ops lib not found')
def c2_utils_import_detectron_ops():
"""Import Detectron ops."""
detectron_ops_lib = envu_get_detectron_ops_lib()
dyndep.InitOpsLibrary(detectron_ops_lib)
def dummy_datasets_get_coco_dataset():
"""A dummy COCO dataset that includes only the 'classes' field."""
ds = AttrDict()
classes = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant',
'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse',
'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack',
'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis',
'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove',
'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass',
'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich',
'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake',
'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv',
'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave',
'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase',
'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
ds.classes = {i: name for i, name in enumerate(classes)}
return ds
def im_detect_body_uv(model, im_scale, boxes):
"""Compute body uv predictions."""
M = cfg.BODY_UV_RCNN.HEATMAP_SIZE
P = cfg.BODY_UV_RCNN.NUM_PATCHES
if boxes.shape[0] == 0:
pred_body_uvs = np.zeros((0, P, M, M), np.float32)
return pred_body_uvs
inputs = {'body_uv_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'body_uv_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.body_uv_net.Proto().name)
AnnIndex = workspace.FetchBlob(core.ScopedName('AnnIndex')).squeeze()
Index_UV = workspace.FetchBlob(core.ScopedName('Index_UV')).squeeze()
U_uv = workspace.FetchBlob(core.ScopedName('U_estimated')).squeeze()
V_uv = workspace.FetchBlob(core.ScopedName('V_estimated')).squeeze()
# In case of 1
if AnnIndex.ndim == 3:
AnnIndex = np.expand_dims(AnnIndex, axis=0)
if Index_UV.ndim == 3:
Index_UV = np.expand_dims(Index_UV, axis=0)
if U_uv.ndim == 3:
U_uv = np.expand_dims(U_uv, axis=0)
if V_uv.ndim == 3:
V_uv = np.expand_dims(V_uv, axis=0)
K = cfg.BODY_UV_RCNN.NUM_PATCHES + 1
outputs = []
for ind, entry in enumerate(boxes):
# Compute ref box width and height
bx = int(max(entry[2] - entry[0], 1))
by = int(max(entry[3] - entry[1], 1))
# preds[ind] axes are CHW; bring p axes to WHC
CurAnnIndex = np.swapaxes(AnnIndex[ind], 0, 2)
CurIndex_UV = np.swapaxes(Index_UV[ind], 0, 2)
CurU_uv = np.swapaxes(U_uv[ind], 0, 2)
CurV_uv = np.swapaxes(V_uv[ind], 0, 2)
# Resize p from (HEATMAP_SIZE, HEATMAP_SIZE, c) to (int(bx), int(by), c)
CurAnnIndex = cv2.resize(CurAnnIndex, (by, bx))
CurIndex_UV = cv2.resize(CurIndex_UV, (by, bx))
CurU_uv = cv2.resize(CurU_uv, (by, bx))
CurV_uv = cv2.resize(CurV_uv, (by, bx))
# Bring Cur_Preds axes back to CHW
CurAnnIndex = np.swapaxes(CurAnnIndex, 0, 2)
CurIndex_UV = np.swapaxes(CurIndex_UV, 0, 2)
CurU_uv = np.swapaxes(CurU_uv, 0, 2)
CurV_uv = np.swapaxes(CurV_uv, 0, 2)
# Removed squeeze calls due to singleton dimension issues
CurAnnIndex = np.argmax(CurAnnIndex, axis=0)
CurIndex_UV = np.argmax(CurIndex_UV, axis=0)
CurIndex_UV = CurIndex_UV * (CurAnnIndex>0).astype(np.float32)
output = np.zeros([3, int(by), int(bx)], dtype=np.float32)
output[0] = CurIndex_UV
for part_id in range(1, K):
CurrentU = CurU_uv[part_id]
CurrentV = CurV_uv[part_id]
output[1, CurIndex_UV==part_id] = CurrentU[CurIndex_UV==part_id]
output[2, CurIndex_UV==part_id] = CurrentV[CurIndex_UV==part_id]
outputs.append(output)
num_classes = cfg.MODEL.NUM_CLASSES
cls_bodys = [[] for _ in range(num_classes)]
person_idx = keypoint_utils_get_person_class_index()
cls_bodys[person_idx] = outputs
return cls_bodys
def compute_oks(src_keypoints, src_roi, dst_keypoints, dst_roi):
"""Compute OKS for predicted keypoints wrt gt_keypoints.
src_keypoints: 4xK
src_roi: 4x1
dst_keypoints: Nx4xK
dst_roi: Nx4
"""
sigmas = np.array([
.26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87,
.87, .89, .89]) / 10.0
vars = (sigmas * 2)**2
# area
src_area = (src_roi[2] - src_roi[0] + 1) * (src_roi[3] - src_roi[1] + 1)
# measure the per-keypoint distance if keypoints visible
dx = dst_keypoints[:, 0, :] - src_keypoints[0, :]
dy = dst_keypoints[:, 1, :] - src_keypoints[1, :]
e = (dx**2 + dy**2) / vars / (src_area + np.spacing(1)) / 2
e = np.sum(np.exp(-e), axis=1) / e.shape[1]
return e
def keypoint_utils_nms_oks(kp_predictions, rois, thresh):
"""Nms based on kp predictions."""
scores = np.mean(kp_predictions[:, 2, :], axis=1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
ovr = compute_oks(
kp_predictions[i], rois[i], kp_predictions[order[1:]],
rois[order[1:]])
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
def scores_to_probs(scores):
"""Transforms CxHxW of scores to probabilities spatially."""
channels = scores.shape[0]
for c in range(channels):
temp = scores[c, :, :]
max_score = temp.max()
temp = np.exp(temp - max_score) / np.sum(np.exp(temp - max_score))
scores[c, :, :] = temp
return scores
def keypoint_utils_heatmaps_to_keypoints(maps, rois):
"""Extract predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths)
heights_ceil = np.ceil(heights)
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = cfg.KRCNN.INFERENCE_MIN_SIZE
xy_preds = np.zeros(
(len(rois), 4, cfg.KRCNN.NUM_KEYPOINTS), dtype=np.float32)
for i in range(len(rois)):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
roi_map = cv2.resize(
maps[i], (roi_map_width, roi_map_height),
interpolation=cv2.INTER_CUBIC)
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
for k in range(cfg.KRCNN.NUM_KEYPOINTS):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
xy_preds[i, 0, k] = x + offset_x[i]
xy_preds[i, 1, k] = y + offset_y[i]
xy_preds[i, 2, k] = roi_map[k, y_int, x_int]
xy_preds[i, 3, k] = roi_map_probs[k, y_int, x_int]
return xy_preds
def keypoint_utils_get_person_class_index():
"""Index of the person class in COCO."""
return 1
def keypoint_results(cls_boxes, pred_heatmaps, ref_boxes):
num_classes = cfg.MODEL.NUM_CLASSES
cls_keyps = [[] for _ in range(num_classes)]
person_idx = keypoint_utils_get_person_class_index()
xy_preds = keypoint_utils_heatmaps_to_keypoints(pred_heatmaps, ref_boxes)
# NMS OKS
if cfg.KRCNN.NMS_OKS:
keep = keypoint_utils_nms_oks(xy_preds, ref_boxes, 0.3)
xy_preds = xy_preds[keep, :, :]
ref_boxes = ref_boxes[keep, :]
pred_heatmaps = pred_heatmaps[keep, :, :, :]
cls_boxes[person_idx] = cls_boxes[person_idx][keep, :]
kps = [xy_preds[i] for i in range(xy_preds.shape[0])]
cls_keyps[person_idx] = kps
return cls_keyps
def im_detect_keypoints(model, im_scale, boxes):
"""Infer instance keypoint poses. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scales (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_heatmaps (ndarray): R x J x M x M array of keypoint location
logits (softmax inputs) for each of the J keypoint types output
by the network (must be processed by keypoint_results to convert
into point predictions in the original image coordinate space)
"""
M = cfg.KRCNN.HEATMAP_SIZE
if boxes.shape[0] == 0:
pred_heatmaps = np.zeros((0, cfg.KRCNN.NUM_KEYPOINTS, M, M), np.float32)
return pred_heatmaps
inputs = {'keypoint_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'keypoint_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.keypoint_net.Proto().name)
pred_heatmaps = workspace.FetchBlob(core.ScopedName('kps_score')).squeeze()
# In case of 1
if pred_heatmaps.ndim == 3:
pred_heatmaps = np.expand_dims(pred_heatmaps, axis=0)
return pred_heatmaps
def combine_heatmaps_size_dep(hms_ts, ds_ts, us_ts, boxes, heur_f):
"""Combines heatmaps while taking object sizes into account."""
assert len(hms_ts) == len(ds_ts) and len(ds_ts) == len(us_ts), \
'All sets of hms must be tagged with downscaling and upscaling flags'
# Classify objects into small+medium and large based on their box areas
areas = box_utils_boxes_area(boxes)
sm_objs = areas < cfg.TEST.KPS_AUG.AREA_TH
l_objs = areas >= cfg.TEST.KPS_AUG.AREA_TH
# Combine heatmaps computed under different transformations for each object
hms_c = np.zeros_like(hms_ts[0])
for i in range(hms_c.shape[0]):
hms_to_combine = []
for hms_t, ds_t, us_t in zip(hms_ts, ds_ts, us_ts):
# Discard downscaling predictions for small and medium objects
if sm_objs[i] and ds_t:
continue
# Discard upscaling predictions for large objects
if l_objs[i] and us_t:
continue
hms_to_combine.append(hms_t[i])
hms_c[i] = heur_f(hms_to_combine)
return hms_c
def im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=False
):
"""Detects keypoints at the given width-relative aspect ratio."""
# Perform keypoint detectionon the transformed image
im_ar = image_utils_aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils_aspect_ratio(boxes, aspect_ratio)
if hflip:
heatmaps_ar = im_detect_keypoints_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
heatmaps_ar = im_detect_keypoints(model, im_scale, boxes_ar)
return heatmaps_ar
def im_detect_keypoints_scale(
model, im, target_scale, target_max_size, boxes, hflip=False
):
"""Computes keypoint predictions at the given scale."""
if hflip:
heatmaps_scl = im_detect_keypoints_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
heatmaps_scl = im_detect_keypoints(model, im_scale, boxes)
return heatmaps_scl
def get_keypoints():
"""Get the COCO keypoints and their left/right flip coorespondence map."""
# Keypoints are not available in the COCO json for the test split, so we
# provide them here.
keypoints = [
'nose',
'left_eye',
'right_eye',
'left_ear',
'right_ear',
'left_shoulder',
'right_shoulder',
'left_elbow',
'right_elbow',
'left_wrist',
'right_wrist',
'left_hip',
'right_hip',
'left_knee',
'right_knee',
'left_ankle',
'right_ankle'
]
keypoint_flip_map = {
'left_eye': 'right_eye',
'left_ear': 'right_ear',
'left_shoulder': 'right_shoulder',
'left_elbow': 'right_elbow',
'left_wrist': 'right_wrist',
'left_hip': 'right_hip',
'left_knee': 'right_knee',
'left_ankle': 'right_ankle'
}
return keypoints, keypoint_flip_map
def keypoint_utils_flip_heatmaps(heatmaps):
"""Flip heatmaps horizontally."""
keypoints, flip_map = get_keypoints()
heatmaps_flipped = heatmaps.copy()
for lkp, rkp in flip_map.items():
lid = keypoints.index(lkp)
rid = keypoints.index(rkp)
heatmaps_flipped[:, rid, :, :] = heatmaps[:, lid, :, :]
heatmaps_flipped[:, lid, :, :] = heatmaps[:, rid, :, :]
heatmaps_flipped = heatmaps_flipped[:, :, :, ::-1]
return heatmaps_flipped
def im_detect_keypoints_hflip(model, im, target_scale, target_max_size, boxes):
"""Computes keypoint predictions on the horizontally flipped image.
Function signature is the same as for im_detect_keypoints_aug.
"""
# Compute keypoints for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils_flip_boxes(boxes, im.shape[1])
im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
heatmaps_hf = im_detect_keypoints(model, im_scale, boxes_hf)
# Invert the predicted keypoints
heatmaps_inv = keypoint_utils_flip_heatmaps(heatmaps_hf)
return heatmaps_inv
def im_detect_keypoints_aug(model, im, boxes):
"""Computes keypoint predictions with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
Returns:
heatmaps (ndarray): R x J x M x M array of keypoint location logits
"""
# Collect heatmaps predicted under different transformations
heatmaps_ts = []
# Tag predictions computed under downscaling and upscaling transformations
ds_ts = []
us_ts = []
def add_heatmaps_t(heatmaps_t, ds_t=False, us_t=False):
heatmaps_ts.append(heatmaps_t)
ds_ts.append(ds_t)
us_ts.append(us_t)
# Compute the heatmaps for the original image (identity transform)
im_scale = im_conv_body_only(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
heatmaps_i = im_detect_keypoints(model, im_scale, boxes)
add_heatmaps_t(heatmaps_i)
# Perform keypoints detection on the horizontally flipped image
if cfg.TEST.KPS_AUG.H_FLIP:
heatmaps_hf = im_detect_keypoints_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_hf)
# Compute detections at different scales
for scale in cfg.TEST.KPS_AUG.SCALES:
ds_scl = scale < cfg.TEST.SCALE
us_scl = scale > cfg.TEST.SCALE
heatmaps_scl = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes
)
add_heatmaps_t(heatmaps_scl, ds_scl, us_scl)
if cfg.TEST.KPS_AUG.SCALE_H_FLIP:
heatmaps_scl_hf = im_detect_keypoints_scale(
model, im, scale, cfg.TEST.KPS_AUG.MAX_SIZE, boxes, hflip=True
)
add_heatmaps_t(heatmaps_scl_hf, ds_scl, us_scl)
# Compute keypoints at different aspect ratios
for aspect_ratio in cfg.TEST.KPS_AUG.ASPECT_RATIOS:
heatmaps_ar = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes
)
add_heatmaps_t(heatmaps_ar)
if cfg.TEST.KPS_AUG.ASPECT_RATIO_H_FLIP:
heatmaps_ar_hf = im_detect_keypoints_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
add_heatmaps_t(heatmaps_ar_hf)
# Select the heuristic function for combining the heatmaps
if cfg.TEST.KPS_AUG.HEUR == 'HM_AVG':
np_f = np.mean
elif cfg.TEST.KPS_AUG.HEUR == 'HM_MAX':
np_f = np.amax
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.KPS_AUG.HEUR)
)
def heur_f(hms_ts):
return np_f(hms_ts, axis=0)
# Combine the heatmaps
if cfg.TEST.KPS_AUG.SCALE_SIZE_DEP:
heatmaps_c = combine_heatmaps_size_dep(
heatmaps_ts, ds_ts, us_ts, boxes, heur_f
)
else:
heatmaps_c = heur_f(heatmaps_ts)
return heatmaps_c
def box_utils_expand_boxes(boxes, scale):
"""Expand an array of boxes by a given scale."""
w_half = (boxes[:, 2] - boxes[:, 0]) * .5
h_half = (boxes[:, 3] - boxes[:, 1]) * .5
x_c = (boxes[:, 2] + boxes[:, 0]) * .5
y_c = (boxes[:, 3] + boxes[:, 1]) * .5
w_half *= scale
h_half *= scale
boxes_exp = np.zeros(boxes.shape)
boxes_exp[:, 0] = x_c - w_half
boxes_exp[:, 2] = x_c + w_half
boxes_exp[:, 1] = y_c - h_half
boxes_exp[:, 3] = y_c + h_half
return boxes_exp
def segm_results(cls_boxes, masks, ref_boxes, im_h, im_w):
num_classes = cfg.MODEL.NUM_CLASSES
cls_segms = [[] for _ in range(num_classes)]
mask_ind = 0
# To work around an issue with cv2.resize (it seems to automatically pad
# with repeated border values), we manually zero-pad the masks by 1 pixel
# prior to resizing back to the original image resolution. This prevents
# "top hat" artifacts. We therefore need to expand the reference boxes by an
# appropriate factor.
M = cfg.MRCNN.RESOLUTION
scale = (M + 2.0) / M
ref_boxes = box_utils_expand_boxes(ref_boxes, scale)
ref_boxes = ref_boxes.astype(np.int32)
padded_mask = np.zeros((M + 2, M + 2), dtype=np.float32)
# skip j = 0, because it's the background class
for j in range(1, num_classes):
segms = []
for _ in range(cls_boxes[j].shape[0]):
if cfg.MRCNN.CLS_SPECIFIC_MASK:
padded_mask[1:-1, 1:-1] = masks[mask_ind, j, :, :]
else:
padded_mask[1:-1, 1:-1] = masks[mask_ind, 0, :, :]
ref_box = ref_boxes[mask_ind, :]
w = ref_box[2] - ref_box[0] + 1
h = ref_box[3] - ref_box[1] + 1
w = np.maximum(w, 1)
h = np.maximum(h, 1)
mask = cv2.resize(padded_mask, (w, h))
mask = np.array(mask > cfg.MRCNN.THRESH_BINARIZE, dtype=np.uint8)
im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
x_0 = max(ref_box[0], 0)
x_1 = min(ref_box[2] + 1, im_w)
y_0 = max(ref_box[1], 0)
y_1 = min(ref_box[3] + 1, im_h)
im_mask[y_0:y_1, x_0:x_1] = mask[
(y_0 - ref_box[1]):(y_1 - ref_box[1]),
(x_0 - ref_box[0]):(x_1 - ref_box[0])
]
# Get RLE encoding used by the COCO evaluation API
rle = mask_util.encode(
np.array(im_mask[:, :, np.newaxis], order='F')
)[0]
segms.append(rle)
mask_ind += 1
cls_segms[j] = segms
assert mask_ind == masks.shape[0]
return cls_segms
def im_detect_mask(model, im_scale, boxes):
"""Infer instance segmentation masks. This function must be called after
im_detect_bbox as it assumes that the Caffe2 workspace is already populated
with the necessary blobs.
Arguments:
model (DetectionModelHelper): the detection model to use
im_scales (list): image blob scales as returned by im_detect_bbox
boxes (ndarray): R x 4 array of bounding box detections (e.g., as
returned by im_detect_bbox)
Returns:
pred_masks (ndarray): R x K x M x M array of class specific soft masks
output by the network (must be processed by segm_results to convert
into hard masks in the original image coordinate space)
"""
M = cfg.MRCNN.RESOLUTION
if boxes.shape[0] == 0:
pred_masks = np.zeros((0, M, M), np.float32)
return pred_masks
inputs = {'mask_rois': _get_rois_blob(boxes, im_scale)}
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois_for_test(inputs, 'mask_rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.mask_net.Proto().name)
# Fetch masks
pred_masks = workspace.FetchBlob(
core.ScopedName('mask_fcn_probs')
).squeeze()
if cfg.MRCNN.CLS_SPECIFIC_MASK:
pred_masks = pred_masks.reshape([-1, cfg.MODEL.NUM_CLASSES, M, M])
else:
pred_masks = pred_masks.reshape([-1, 1, M, M])
return pred_masks
def im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes, hflip=False):
"""Computes mask detections at the given width-relative aspect ratio."""
# Perform mask detection on the transformed image
im_ar = image_utils_aspect_ratio_rel(im, aspect_ratio)
boxes_ar = box_utils_aspect_ratio(boxes, aspect_ratio)
if hflip:
masks_ar = im_detect_mask_hflip(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes_ar
)
else:
im_scale = im_conv_body_only(
model, im_ar, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE
)
masks_ar = im_detect_mask(model, im_scale, boxes_ar)
return masks_ar
def im_detect_mask_scale(
model, im, target_scale, target_max_size, boxes, hflip=False
):
"""Computes masks at the given scale."""
if hflip:
masks_scl = im_detect_mask_hflip(
model, im, target_scale, target_max_size, boxes
)
else:
im_scale = im_conv_body_only(model, im, target_scale, target_max_size)
masks_scl = im_detect_mask(model, im_scale, boxes)
return masks_scl
def im_detect_mask_hflip(model, im, target_scale, target_max_size, boxes):
"""Performs mask detection on the horizontally flipped image.
Function signature is the same as for im_detect_mask_aug.
"""
# Compute the masks for the flipped image
im_hf = im[:, ::-1, :]
boxes_hf = box_utils_flip_boxes(boxes, im.shape[1])
im_scale = im_conv_body_only(model, im_hf, target_scale, target_max_size)
masks_hf = im_detect_mask(model, im_scale, boxes_hf)
# Invert the predicted soft masks
masks_inv = masks_hf[:, :, :, ::-1]
return masks_inv
def im_conv_body_only(model, im, target_scale, target_max_size):
"""Runs `model.conv_body_net` on the given image `im`."""
im_blob, im_scale, _im_info = blob_utils_get_image_blob(
im, target_scale, target_max_size
)
workspace.FeedBlob(core.ScopedName('data'), im_blob)
workspace.RunNet(model.conv_body_net.Proto().name)
return im_scale
def im_detect_mask_aug(model, im, boxes):
"""Performs mask detection with test-time augmentations.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): BGR image to test
boxes (ndarray): R x 4 array of bounding boxes
Returns:
masks (ndarray): R x K x M x M array of class specific soft masks
"""
assert not cfg.TEST.MASK_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
# Collect masks computed under different transformations
masks_ts = []
# Compute masks for the original image (identity transform)
im_scale_i = im_conv_body_only(model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
masks_i = im_detect_mask(model, im_scale_i, boxes)
masks_ts.append(masks_i)
# Perform mask detection on the horizontally flipped image
if cfg.TEST.MASK_AUG.H_FLIP:
masks_hf = im_detect_mask_hflip(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes
)
masks_ts.append(masks_hf)
# Compute detections at different scales
for scale in cfg.TEST.MASK_AUG.SCALES:
max_size = cfg.TEST.MASK_AUG.MAX_SIZE
masks_scl = im_detect_mask_scale(model, im, scale, max_size, boxes)
masks_ts.append(masks_scl)
if cfg.TEST.MASK_AUG.SCALE_H_FLIP:
masks_scl_hf = im_detect_mask_scale(
model, im, scale, max_size, boxes, hflip=True
)
masks_ts.append(masks_scl_hf)
# Compute masks at different aspect ratios
for aspect_ratio in cfg.TEST.MASK_AUG.ASPECT_RATIOS:
masks_ar = im_detect_mask_aspect_ratio(model, im, aspect_ratio, boxes)
masks_ts.append(masks_ar)
if cfg.TEST.MASK_AUG.ASPECT_RATIO_H_FLIP:
masks_ar_hf = im_detect_mask_aspect_ratio(
model, im, aspect_ratio, boxes, hflip=True
)
masks_ts.append(masks_ar_hf)
# Combine the predicted soft masks
if cfg.TEST.MASK_AUG.HEUR == 'SOFT_AVG':
masks_c = np.mean(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'SOFT_MAX':
masks_c = np.amax(masks_ts, axis=0)
elif cfg.TEST.MASK_AUG.HEUR == 'LOGIT_AVG':
def logit(y):
return -1.0 * np.log((1.0 - y) / np.maximum(y, 1e-20))
logit_masks = [logit(y) for y in masks_ts]
logit_masks = np.mean(logit_masks, axis=0)
masks_c = 1.0 / (1.0 + np.exp(-logit_masks))
else:
raise NotImplementedError(
'Heuristic {} not supported'.format(cfg.TEST.MASK_AUG.HEUR)
)
return masks_c
def box_utils_box_voting(top_dets, all_dets, thresh, scoring_method='ID', beta=1.0):
"""Apply bounding-box voting to refine `top_dets` by voting with `all_dets`.
See: https://arxiv.org/abs/1505.01749. Optional score averaging (not in the
referenced paper) can be applied by setting `scoring_method` appropriately.
"""
# top_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
# all_dets is [N, 5] each row is [x1 y1 x2 y2, sore]
top_dets_out = top_dets.copy()
top_boxes = top_dets[:, :4]
all_boxes = all_dets[:, :4]
all_scores = all_dets[:, 4]
top_to_all_overlaps = bbox_overlaps(top_boxes, all_boxes)
for k in range(top_dets_out.shape[0]):
inds_to_vote = np.where(top_to_all_overlaps[k] >= thresh)[0]
boxes_to_vote = all_boxes[inds_to_vote, :]
ws = all_scores[inds_to_vote]
top_dets_out[k, :4] = np.average(boxes_to_vote, axis=0, weights=ws)
if scoring_method == 'ID':
# Identity, nothing to do
pass
elif scoring_method == 'TEMP_AVG':
# Average probabilities (considered as P(detected class) vs.
# P(not the detected class)) after smoothing with a temperature
# hyperparameter.
P = np.vstack((ws, 1.0 - ws))
P_max = np.max(P, axis=0)
X = np.log(P / P_max)
X_exp = np.exp(X / beta)
P_temp = X_exp / np.sum(X_exp, axis=0)
P_avg = P_temp[0].mean()
top_dets_out[k, 4] = P_avg
elif scoring_method == 'AVG':
# Combine new probs from overlapping boxes
top_dets_out[k, 4] = ws.mean()
elif scoring_method == 'IOU_AVG':
P = ws
ws = top_to_all_overlaps[k, inds_to_vote]
P_avg = np.average(P, weights=ws)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'GENERALIZED_AVG':
P_avg = np.mean(ws**beta)**(1.0 / beta)
top_dets_out[k, 4] = P_avg
elif scoring_method == 'QUASI_SUM':
top_dets_out[k, 4] = ws.sum() / float(len(ws))**beta
else:
raise NotImplementedError(
'Unknown scoring method {}'.format(scoring_method)
)
return top_dets_out
def box_utils_soft_nms(
dets, sigma=0.5, overlap_thresh=0.3, score_thresh=0.001, method='linear'
):
"""Apply the soft NMS algorithm from https://arxiv.org/abs/1704.04503."""
if dets.shape[0] == 0:
return dets, []
methods = {'hard': 0, 'linear': 1, 'gaussian': 2}
assert method in methods, 'Unknown soft_nms method: {}'.format(method)
dets, keep = cython_nms.soft_nms(
np.ascontiguousarray(dets, dtype=np.float32),
np.float32(sigma),
np.float32(overlap_thresh),
np.float32(score_thresh),
np.uint8(methods[method])
)
return dets, keep
def box_results_with_nms_and_limit(scores, boxes):
"""Returns bounding-box detection results by thresholding on scores and
applying non-maximum suppression (NMS).
`boxes` has shape (#detections, 4 * #classes), where each row represents
a list of predicted bounding boxes for each of the object classes in the
dataset (including the background class). The detections in each row
originate from the same object proposal.
`scores` has shape (#detection, #classes), where each row represents a list
of object detection confidence scores for each of the object classes in the
dataset (including the background class). `scores[i, j]`` corresponds to the
box at `boxes[i, j * 4:(j + 1) * 4]`.
"""
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(num_classes)]
# Apply threshold on detection probabilities and apply NMS
# Skip j = 0, because it's the background class
for j in range(1, num_classes):
inds = np.where(scores[:, j] > cfg.TEST.SCORE_THRESH)[0]
scores_j = scores[inds, j]
boxes_j = boxes[inds, j * 4:(j + 1) * 4]
dets_j = np.hstack((boxes_j, scores_j[:, np.newaxis])).astype(
np.float32, copy=False
)
if cfg.TEST.SOFT_NMS.ENABLED:
nms_dets, _ = box_utils_soft_nms(
dets_j,
sigma=cfg.TEST.SOFT_NMS.SIGMA,
overlap_thresh=cfg.TEST.NMS,
score_thresh=0.0001,
method=cfg.TEST.SOFT_NMS.METHOD
)
else:
keep = box_utils_nms(dets_j, cfg.TEST.NMS)
nms_dets = dets_j[keep, :]
# Refine the post-NMS boxes using bounding-box voting
if cfg.TEST.BBOX_VOTE.ENABLED:
nms_dets = box_utils_box_voting(
nms_dets,
dets_j,
cfg.TEST.BBOX_VOTE.VOTE_TH,
scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD
)
cls_boxes[j] = nms_dets
# Limit to max_per_image detections **over all classes**
if cfg.TEST.DETECTIONS_PER_IM > 0:
image_scores = np.hstack(
[cls_boxes[j][:, -1] for j in range(1, num_classes)]
)
if len(image_scores) > cfg.TEST.DETECTIONS_PER_IM:
image_thresh = np.sort(image_scores)[-cfg.TEST.DETECTIONS_PER_IM]
for j in range(1, num_classes):
keep = np.where(cls_boxes[j][:, -1] >= image_thresh)[0]
cls_boxes[j] = cls_boxes[j][keep, :]
im_results = np.vstack([cls_boxes[j] for j in range(1, num_classes)])
boxes = im_results[:, :-1]
scores = im_results[:, -1]
return scores, boxes, cls_boxes
def _add_multilevel_rois_for_test(blobs, name):
"""Distributes a set of RoIs across FPN pyramid levels by creating new level
specific RoI blobs.
Arguments:
blobs (dict): dictionary of blobs
name (str): a key in 'blobs' identifying the source RoI blob
Returns:
[by ref] blobs (dict): new keys named by `name + 'fpn' + level`
are added to dict each with a value that's an R_level x 5 ndarray of
RoIs (see _get_rois_blob for format)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_map_rois_to_fpn_levels(blobs[name][:, 1:5], lvl_min, lvl_max)
fpn_add_multilevel_roi_blobs(
blobs, name, blobs[name], lvls, lvl_min, lvl_max
)
def _project_im_rois(im_rois, scales):
"""Project image RoIs into the image pyramid built by _get_image_blob.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
scales (list): scale factors as returned by _get_image_blob
Returns:
rois (ndarray): R x 4 matrix of projected RoI coordinates
levels (ndarray): image pyramid levels used by each projected RoI
"""
rois = im_rois.astype(np.float, copy=False) * scales
levels = np.zeros((im_rois.shape[0], 1), dtype=np.int)
return rois, levels
def _get_rois_blob(im_rois, im_scale):
"""Converts RoIs into network inputs.
Arguments:
im_rois (ndarray): R x 4 matrix of RoIs in original image coordinates
im_scale_factors (list): scale factors as returned by _get_image_blob
Returns:
blob (ndarray): R x 5 matrix of RoIs in the image pyramid with columns
[level, x1, y1, x2, y2]
"""
rois, levels = _project_im_rois(im_rois, im_scale)
rois_blob = np.hstack((levels, rois))
return rois_blob.astype(np.float32, copy=False)
def _get_blobs(im, rois, target_scale, target_max_size):
"""Convert an image and RoIs within that image into network inputs."""
blobs = {}
blobs['data'], im_scale, blobs['im_info'] = \
blob_utils_get_image_blob(im, target_scale, target_max_size)
if rois is not None:
blobs['rois'] = _get_rois_blob(rois, im_scale)
return blobs, im_scale
def im_detect_bbox(model, im, target_scale, target_max_size, boxes=None):
"""Bounding box object detection for an image with given box proposals.
Arguments:
model (DetectionModelHelper): the detection model to use
im (ndarray): color image to test (in BGR order)
boxes (ndarray): R x 4 array of object proposals in 0-indexed
[x1, y1, x2, y2] format, or None if using RPN
Returns:
scores (ndarray): R x K array of object class scores for K classes
(K includes background as object category 0)
boxes (ndarray): R x 4*K array of predicted bounding boxes
im_scales (list): list of image scales used in the input blob (as
returned by _get_blobs and for use with im_detect_mask, etc.)
"""
inputs, im_scale = _get_blobs(im, boxes, target_scale, target_max_size)
# When mapping from image ROIs to feature map ROIs, there's some aliasing
# (some distinct image ROIs get mapped to the same feature ROI).
# Here, we identify duplicate feature ROIs, so we only compute features
# on the unique subset.
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
v = np.array([1, 1e3, 1e6, 1e9, 1e12])
hashes = np.round(inputs['rois'] * cfg.DEDUP_BOXES).dot(v)
_, index, inv_index = np.unique(
hashes, return_index=True, return_inverse=True
)
inputs['rois'] = inputs['rois'][index, :]
boxes = boxes[index, :]
# Add multi-level rois for FPN
if cfg.FPN.MULTILEVEL_ROIS and not cfg.MODEL.FASTER_RCNN:
_add_multilevel_rois_for_test(inputs, 'rois')
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v)
workspace.RunNet(model.net.Proto().name)
# Read out blobs
if cfg.MODEL.FASTER_RCNN:
rois = workspace.FetchBlob(core.ScopedName('rois'))
# unscale back to raw image space
boxes = rois[:, 1:5] / im_scale
# Softmax class probabilities
scores = workspace.FetchBlob(core.ScopedName('cls_prob')).squeeze()
# In case there is 1 proposal
scores = scores.reshape([-1, scores.shape[-1]])
if cfg.TEST.BBOX_REG:
# Apply bounding-box regression deltas
box_deltas = workspace.FetchBlob(core.ScopedName('bbox_pred')).squeeze()
# In case there is 1 proposal
box_deltas = box_deltas.reshape([-1, box_deltas.shape[-1]])
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
# Remove predictions for bg class (compat with MSRA code)
box_deltas = box_deltas[:, -4:]
pred_boxes = box_utils_bbox_transform(
boxes, box_deltas, cfg.MODEL.BBOX_REG_WEIGHTS
)
pred_boxes = box_utils_clip_tiled_boxes(pred_boxes, im.shape)
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
pred_boxes = np.tile(pred_boxes, (1, scores.shape[1]))
else:
# Simply repeat the boxes, once for each class
pred_boxes = np.tile(boxes, (1, scores.shape[1]))
if cfg.DEDUP_BOXES > 0 and not cfg.MODEL.FASTER_RCNN:
# Map scores and predictions back to the original set of boxes
scores = scores[inv_index, :]
pred_boxes = pred_boxes[inv_index, :]
return scores, pred_boxes, im_scale
def box_utils_aspect_ratio(boxes, aspect_ratio):
"""Perform width-relative aspect ratio transformation."""
boxes_ar = boxes.copy()
boxes_ar[:, 0::4] = aspect_ratio * boxes[:, 0::4]
boxes_ar[:, 2::4] = aspect_ratio * boxes[:, 2::4]
return boxes_ar
def image_utils_aspect_ratio_rel(im, aspect_ratio):
"""Performs width-relative aspect ratio transformation."""
im_h, im_w = im.shape[:2]
im_ar_w = int(round(aspect_ratio * im_w))
im_ar = cv2.resize(im, dsize=(im_ar_w, im_h))
return im_ar
def im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals=None, hflip=False
):
"""Computes bbox detections at the given width-relative aspect ratio.
Returns predictions in the original image space.
"""
# Compute predictions on the transformed image
im_ar = image_utils_aspect_ratio_rel(im, aspect_ratio)
if not cfg.MODEL.FASTER_RCNN:
box_proposals_ar = box_utils_aspect_ratio(box_proposals, aspect_ratio)
else:
box_proposals_ar = None
if hflip:
scores_ar, boxes_ar, _ = im_detect_bbox_hflip(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals_ar
)
else:
scores_ar, boxes_ar, _ = im_detect_bbox(
model,
im_ar,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
boxes=box_proposals_ar
)
# Invert the detected boxes
boxes_inv = box_utils_aspect_ratio(boxes_ar, 1.0 / aspect_ratio)
return scores_ar, boxes_inv
def im_detect_bbox_scale(
model, im, target_scale, target_max_size, box_proposals=None, hflip=False
):
"""Computes bbox detections at the given scale.
Returns predictions in the original image space.
"""
if hflip:
scores_scl, boxes_scl, _ = im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=box_proposals
)
else:
scores_scl, boxes_scl, _ = im_detect_bbox(
model, im, target_scale, target_max_size, boxes=box_proposals
)
return scores_scl, boxes_scl
def box_utils_flip_boxes(boxes, im_width):
"""Flip boxes horizontally."""
boxes_flipped = boxes.copy()
boxes_flipped[:, 0::4] = im_width - boxes[:, 2::4] - 1
boxes_flipped[:, 2::4] = im_width - boxes[:, 0::4] - 1
return boxes_flipped
def im_detect_bbox_hflip(
model, im, target_scale, target_max_size, box_proposals=None
):
"""Performs bbox detection on the horizontally flipped image.
Function signature is the same as for im_detect_bbox.
"""
# Compute predictions on the flipped image
im_hf = im[:, ::-1, :]
im_width = im.shape[1]
if not cfg.MODEL.FASTER_RCNN:
box_proposals_hf = box_utils_flip_boxes(box_proposals, im_width)
else:
box_proposals_hf = None
scores_hf, boxes_hf, im_scale = im_detect_bbox(
model, im_hf, target_scale, target_max_size, boxes=box_proposals_hf
)
# Invert the detections computed on the flipped image
boxes_inv = box_utils_flip_boxes(boxes_hf, im_width)
return scores_hf, boxes_inv, im_scale
def im_detect_bbox_aug(model, im, box_proposals=None):
"""Performs bbox detection with test-time augmentations.
Function signature is the same as for im_detect_bbox.
"""
assert not cfg.TEST.BBOX_AUG.SCALE_SIZE_DEP, \
'Size dependent scaling not implemented'
assert not cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION', \
'Coord heuristic must be union whenever score heuristic is union'
assert not cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION' or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Score heuristic must be union whenever coord heuristic is union'
assert not cfg.MODEL.FASTER_RCNN or \
cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION', \
'Union heuristic must be used to combine Faster RCNN predictions'
# Collect detections computed under different transformations
scores_ts = []
boxes_ts = []
def add_preds_t(scores_t, boxes_t):
scores_ts.append(scores_t)
boxes_ts.append(boxes_t)
# Perform detection on the horizontally flipped image
if cfg.TEST.BBOX_AUG.H_FLIP:
scores_hf, boxes_hf, _ = im_detect_bbox_hflip(
model,
im,
cfg.TEST.SCALE,
cfg.TEST.MAX_SIZE,
box_proposals=box_proposals
)
add_preds_t(scores_hf, boxes_hf)
# Compute detections at different scales
for scale in cfg.TEST.BBOX_AUG.SCALES:
max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
scores_scl, boxes_scl = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals
)
add_preds_t(scores_scl, boxes_scl)
if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
scores_scl_hf, boxes_scl_hf = im_detect_bbox_scale(
model, im, scale, max_size, box_proposals, hflip=True
)
add_preds_t(scores_scl_hf, boxes_scl_hf)
# Perform detection at different aspect ratios
for aspect_ratio in cfg.TEST.BBOX_AUG.ASPECT_RATIOS:
scores_ar, boxes_ar = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals
)
add_preds_t(scores_ar, boxes_ar)
if cfg.TEST.BBOX_AUG.ASPECT_RATIO_H_FLIP:
scores_ar_hf, boxes_ar_hf = im_detect_bbox_aspect_ratio(
model, im, aspect_ratio, box_proposals, hflip=True
)
add_preds_t(scores_ar_hf, boxes_ar_hf)
# Compute detections for the original image (identity transform) last to
# ensure that the Caffe2 workspace is populated with blobs corresponding
# to the original image on return (postcondition of im_detect_bbox)
scores_i, boxes_i, im_scale_i = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
)
add_preds_t(scores_i, boxes_i)
# Combine the predicted scores
if cfg.TEST.BBOX_AUG.SCORE_HEUR == 'ID':
scores_c = scores_i
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'AVG':
scores_c = np.mean(scores_ts, axis=0)
elif cfg.TEST.BBOX_AUG.SCORE_HEUR == 'UNION':
scores_c = np.vstack(scores_ts)
else:
raise NotImplementedError(
'Score heur {} not supported'.format(cfg.TEST.BBOX_AUG.SCORE_HEUR)
)
# Combine the predicted boxes
if cfg.TEST.BBOX_AUG.COORD_HEUR == 'ID':
boxes_c = boxes_i
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'AVG':
boxes_c = np.mean(boxes_ts, axis=0)
elif cfg.TEST.BBOX_AUG.COORD_HEUR == 'UNION':
boxes_c = np.vstack(boxes_ts)
else:
raise NotImplementedError(
'Coord heur {} not supported'.format(cfg.TEST.BBOX_AUG.COORD_HEUR)
)
return scores_c, boxes_c, im_scale_i
def im_list_to_blob(ims):
"""Convert a list of images into a network input. Assumes images were
prepared using prep_im_for_blob or equivalent: i.e.
- BGR channel order
- pixel means subtracted
- resized to the desired input size
- float32 numpy ndarray format
Output is a 4D HCHW tensor of the images concatenated along axis 0 with
shape.
"""
if not isinstance(ims, list):
ims = [ims]
max_shape = np.array([im.shape for im in ims]).max(axis=0)
# Pad the image so they can be divisible by a stride
if cfg.FPN.FPN_ON:
stride = float(cfg.FPN.COARSEST_STRIDE)
max_shape[0] = int(np.ceil(max_shape[0] / stride) * stride)
max_shape[1] = int(np.ceil(max_shape[1] / stride) * stride)
num_images = len(ims)
blob = np.zeros(
(num_images, max_shape[0], max_shape[1], 3), dtype=np.float32
)
for i in range(num_images):
im = ims[i]
blob[i, 0:im.shape[0], 0:im.shape[1], :] = im
# Move channels (axis 3) to axis 1
# Axis order will become: (batch elem, channel, height, width)
channel_swap = (0, 3, 1, 2)
blob = blob.transpose(channel_swap)
return blob
def prep_im_for_blob(im, pixel_means, target_size, max_size):
"""Prepare an image for use as a network input blob. Specially:
- Subtract per-channel pixel mean
- Convert to float32
- Rescale to each of the specified target size (capped at max_size)
Returns a list of transformed images, one for each target size. Also returns
the scale factors that were used to compute each returned image.
"""
im = im.astype(np.float32, copy=False)
im -= pixel_means
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(target_size) / float(im_size_min)
# Prevent the biggest axis from being more than max_size
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(
im,
None,
None,
fx=im_scale,
fy=im_scale,
interpolation=cv2.INTER_LINEAR
)
return im, im_scale
def blob_utils_get_image_blob(im, target_scale, target_max_size):
"""Convert an image into a network input.
Arguments:
im (ndarray): a color image in BGR order
Returns:
blob (ndarray): a data blob holding an image pyramid
im_scale (float): image scale (target size) / (original size)
im_info (ndarray)
"""
processed_im, im_scale = prep_im_for_blob(
im, cfg.PIXEL_MEANS, target_scale, target_max_size
)
blob = im_list_to_blob(processed_im)
# NOTE: this height and width may be larger than actual scaled input image
# due to the FPN.COARSEST_STRIDE related padding in im_list_to_blob. We are
# maintaining this behavior for now to make existing results exactly
# reproducible (in practice using the true input image height and width
# yields nearly the same results, but they are sometimes slightly different
# because predictions near the edge of the image will be pruned more
# aggressively).
height, width = blob.shape[2], blob.shape[3]
im_info = np.hstack((height, width, im_scale))[np.newaxis, :]
return blob, im_scale, im_info.astype(np.float32)
def _scale_enum(anchor, scales):
"""Enumerate a set of anchors for each scale wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
ws = w * scales
hs = h * scales
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _mkanchors(ws, hs, x_ctr, y_ctr):
"""Given a vector of widths (ws) and heights (hs) around a center
(x_ctr, y_ctr), output a set of anchors (windows).
"""
ws = ws[:, np.newaxis]
hs = hs[:, np.newaxis]
anchors = np.hstack(
(
x_ctr - 0.5 * (ws - 1),
y_ctr - 0.5 * (hs - 1),
x_ctr + 0.5 * (ws - 1),
y_ctr + 0.5 * (hs - 1)
)
)
return anchors
def _whctrs(anchor):
"""Return width, height, x center, and y center for an anchor (window)."""
w = anchor[2] - anchor[0] + 1
h = anchor[3] - anchor[1] + 1
x_ctr = anchor[0] + 0.5 * (w - 1)
y_ctr = anchor[1] + 0.5 * (h - 1)
return w, h, x_ctr, y_ctr
def _ratio_enum(anchor, ratios):
"""Enumerate a set of anchors for each aspect ratio wrt an anchor."""
w, h, x_ctr, y_ctr = _whctrs(anchor)
size = w * h
size_ratios = size / ratios
ws = np.round(np.sqrt(size_ratios))
hs = np.round(ws * ratios)
anchors = _mkanchors(ws, hs, x_ctr, y_ctr)
return anchors
def _generate_anchors(base_size, scales, aspect_ratios):
"""Generate anchor (reference) windows by enumerating aspect ratios X
scales wrt a reference (0, 0, base_size - 1, base_size - 1) window.
"""
anchor = np.array([1, 1, base_size, base_size], dtype=np.float) - 1
anchors = _ratio_enum(anchor, aspect_ratios)
anchors = np.vstack(
[_scale_enum(anchors[i, :], scales) for i in range(anchors.shape[0])]
)
return anchors
def generate_anchors(
stride=16, sizes=(32, 64, 128, 256, 512), aspect_ratios=(0.5, 1, 2)
):
"""Generates a matrix of anchor boxes in (x1, y1, x2, y2) format. Anchors
are centered on stride / 2, have (approximate) sqrt areas of the specified
sizes, and aspect ratios as given.
"""
return _generate_anchors(
stride,
np.array(sizes, dtype=np.float) / stride,
np.array(aspect_ratios, dtype=np.float)
)
def _create_cell_anchors():
"""
Generate all types of anchors for all fpn levels/scales/aspect ratios.
This function is called only once at the beginning of inference.
"""
k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL
scales_per_octave = cfg.RETINANET.SCALES_PER_OCTAVE
aspect_ratios = cfg.RETINANET.ASPECT_RATIOS
anchor_scale = cfg.RETINANET.ANCHOR_SCALE
A = scales_per_octave * len(aspect_ratios)
anchors = {}
for lvl in range(k_min, k_max + 1):
# create cell anchors array
stride = 2. ** lvl
cell_anchors = np.zeros((A, 4))
a = 0
for octave in range(scales_per_octave):
octave_scale = 2 ** (octave / float(scales_per_octave))
for aspect in aspect_ratios:
anchor_sizes = (stride * octave_scale * anchor_scale, )
anchor_aspect_ratios = (aspect, )
cell_anchors[a, :] = generate_anchors(
stride=stride, sizes=anchor_sizes,
aspect_ratios=anchor_aspect_ratios)
a += 1
anchors[lvl] = cell_anchors
return anchors
def test_retinanet_im_detect_bbox(model, im, timers=None):
"""Generate RetinaNet detections on a single image."""
if timers is None:
timers = defaultdict(Timer)
# Although anchors are input independent and could be precomputed,
# recomputing them per image only brings a small overhead
anchors = _create_cell_anchors()
timers['im_detect_bbox'].tic()
k_max, k_min = cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.RPN_MIN_LEVEL
A = cfg.RETINANET.SCALES_PER_OCTAVE * len(cfg.RETINANET.ASPECT_RATIOS)
inputs = {}
inputs['data'], im_scale, inputs['im_info'] = \
blob_utils_get_image_blob(im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE)
cls_probs, box_preds = [], []
for lvl in range(k_min, k_max + 1):
suffix = 'fpn{}'.format(lvl)
cls_probs.append(core.ScopedName('retnet_cls_prob_{}'.format(suffix)))
box_preds.append(core.ScopedName('retnet_bbox_pred_{}'.format(suffix)))
for k, v in inputs.items():
workspace.FeedBlob(core.ScopedName(k), v.astype(np.float32, copy=False))
workspace.RunNet(model.net.Proto().name)
cls_probs = workspace.FetchBlobs(cls_probs)
box_preds = workspace.FetchBlobs(box_preds)
# here the boxes_all are [x0, y0, x1, y1, score]
boxes_all = defaultdict(list)
cnt = 0
for lvl in range(k_min, k_max + 1):
# create cell anchors array
stride = 2. ** lvl
cell_anchors = anchors[lvl]
# fetch per level probability
cls_prob = cls_probs[cnt]
box_pred = box_preds[cnt]
cls_prob = cls_prob.reshape((
cls_prob.shape[0], A, int(cls_prob.shape[1] / A),
cls_prob.shape[2], cls_prob.shape[3]))
box_pred = box_pred.reshape((
box_pred.shape[0], A, 4, box_pred.shape[2], box_pred.shape[3]))
cnt += 1
if cfg.RETINANET.SOFTMAX:
cls_prob = cls_prob[:, :, 1::, :, :]
cls_prob_ravel = cls_prob.ravel()
# In some cases [especially for very small img sizes], it's possible that
# candidate_ind is empty if we impose threshold 0.05 at all levels. This
# will lead to errors since no detections are found for this image. Hence,
# for lvl 7 which has small spatial resolution, we take the threshold 0.0
th = cfg.RETINANET.INFERENCE_TH if lvl < k_max else 0.0
candidate_inds = np.where(cls_prob_ravel > th)[0]
if (len(candidate_inds) == 0):
continue
pre_nms_topn = min(cfg.RETINANET.PRE_NMS_TOP_N, len(candidate_inds))
inds = np.argpartition(
cls_prob_ravel[candidate_inds], -pre_nms_topn)[-pre_nms_topn:]
inds = candidate_inds[inds]
inds_5d = np.array(np.unravel_index(inds, cls_prob.shape)).transpose()
classes = inds_5d[:, 2]
anchor_ids, y, x = inds_5d[:, 1], inds_5d[:, 3], inds_5d[:, 4]
scores = cls_prob[:, anchor_ids, classes, y, x]
boxes = np.column_stack((x, y, x, y)).astype(dtype=np.float32)
boxes *= stride
boxes += cell_anchors[anchor_ids, :]
if not cfg.RETINANET.CLASS_SPECIFIC_BBOX:
box_deltas = box_pred[0, anchor_ids, :, y, x]
else:
box_cls_inds = classes * 4
box_deltas = np.vstack(
[box_pred[0, ind:ind + 4, yi, xi]
for ind, yi, xi in zip(box_cls_inds, y, x)]
)
pred_boxes = (
box_utils_bbox_transform(boxes, box_deltas)
if cfg.TEST.BBOX_REG else boxes)
pred_boxes /= im_scale
pred_boxes = box_utils_clip_tiled_boxes(pred_boxes, im.shape)
box_scores = np.zeros((pred_boxes.shape[0], 5))
box_scores[:, 0:4] = pred_boxes
box_scores[:, 4] = scores
for cls in range(1, cfg.MODEL.NUM_CLASSES):
inds = np.where(classes == cls - 1)[0]
if len(inds) > 0:
boxes_all[cls].extend(box_scores[inds, :])
timers['im_detect_bbox'].toc()
# Combine predictions across all levels and retain the top scoring by class
timers['misc_bbox'].tic()
detections = []
for cls, boxes in boxes_all.items():
cls_dets = np.vstack(boxes).astype(dtype=np.float32)
# do class specific nms here
keep = box_utils_nms(cls_dets, cfg.TEST.NMS)
cls_dets = cls_dets[keep, :]
out = np.zeros((len(keep), 6))
out[:, 0:5] = cls_dets
out[:, 5].fill(cls)
detections.append(out)
# detections (N, 6) format:
# detections[:, :4] - boxes
# detections[:, 4] - scores
# detections[:, 5] - classes
detections = np.vstack(detections)
# sort all again
inds = np.argsort(-detections[:, 4])
detections = detections[inds[0:cfg.TEST.DETECTIONS_PER_IM], :]
# Convert the detections to image cls_ format (see core/test_engine.py)
num_classes = cfg.MODEL.NUM_CLASSES
cls_boxes = [[] for _ in range(cfg.MODEL.NUM_CLASSES)]
for c in range(1, num_classes):
inds = np.where(detections[:, 5] == c)[0]
cls_boxes[c] = detections[inds, :5]
timers['misc_bbox'].toc()
return cls_boxes
def infer_engine_im_detect_all(model, im, box_proposals, timers=None):
if timers is None:
timers = defaultdict(Timer)
# Handle RetinaNet testing separately for now
if cfg.RETINANET.RETINANET_ON:
cls_boxes = test_retinanet_im_detect_bbox(model, im, timers)
return cls_boxes, None, None
timers['im_detect_bbox'].tic()
if cfg.TEST.BBOX_AUG.ENABLED:
scores, boxes, im_scale = im_detect_bbox_aug(model, im, box_proposals)
else:
scores, boxes, im_scale = im_detect_bbox(
model, im, cfg.TEST.SCALE, cfg.TEST.MAX_SIZE, boxes=box_proposals
)
timers['im_detect_bbox'].toc()
# score and boxes are from the whole image after score thresholding and nms
# (they are not separated by class)
# cls_boxes boxes and scores are separated by class and in the format used
# for evaluating results
timers['misc_bbox'].tic()
scores, boxes, cls_boxes = box_results_with_nms_and_limit(scores, boxes)
timers['misc_bbox'].toc()
if cfg.MODEL.MASK_ON and boxes.shape[0] > 0:
timers['im_detect_mask'].tic()
if cfg.TEST.MASK_AUG.ENABLED:
masks = im_detect_mask_aug(model, im, boxes)
else:
masks = im_detect_mask(model, im_scale, boxes)
timers['im_detect_mask'].toc()
timers['misc_mask'].tic()
cls_segms = segm_results(
cls_boxes, masks, boxes, im.shape[0], im.shape[1]
)
timers['misc_mask'].toc()
else:
cls_segms = None
if cfg.MODEL.KEYPOINTS_ON and boxes.shape[0] > 0:
timers['im_detect_keypoints'].tic()
if cfg.TEST.KPS_AUG.ENABLED:
heatmaps = im_detect_keypoints_aug(model, im, boxes)
else:
heatmaps = im_detect_keypoints(model, im_scale, boxes)
timers['im_detect_keypoints'].toc()
timers['misc_keypoints'].tic()
cls_keyps = keypoint_results(cls_boxes, heatmaps, boxes)
timers['misc_keypoints'].toc()
else:
cls_keyps = None
if cfg.MODEL.BODY_UV_ON and boxes.shape[0] > 0:
timers['im_detect_body_uv'].tic()
cls_bodys = im_detect_body_uv(model, im_scale, boxes)
timers['im_detect_body_uv'].toc()
else:
cls_bodys = None
return cls_boxes, cls_segms, cls_keyps, cls_bodys
def model_builder_add_inference_inputs(model):
"""Create network input blobs used for inference."""
def create_input_blobs_for_net(net_def):
for op in net_def.op:
for blob_in in op.input:
if not workspace.HasBlob(blob_in):
workspace.CreateBlob(blob_in)
create_input_blobs_for_net(model.net.Proto())
if cfg.MODEL.MASK_ON:
create_input_blobs_for_net(model.mask_net.Proto())
if cfg.MODEL.KEYPOINTS_ON:
create_input_blobs_for_net(model.keypoint_net.Proto())
if cfg.MODEL.BODY_UV_ON:
create_input_blobs_for_net(model.body_uv_net.Proto())
def add_single_gpu_param_update_ops(model, gpu_id):
# Learning rate of 0 is a dummy value to be set properly at the
# start of training
lr = model.param_init_net.ConstantFill(
[], 'lr', shape=[1], value=0.0
)
one = model.param_init_net.ConstantFill(
[], 'one', shape=[1], value=1.0
)
wd = model.param_init_net.ConstantFill(
[], 'wd', shape=[1], value=cfg.SOLVER.WEIGHT_DECAY
)
# weight decay of GroupNorm's parameters
wd_gn = model.param_init_net.ConstantFill(
[], 'wd_gn', shape=[1], value=cfg.SOLVER.WEIGHT_DECAY_GN
)
for param in model.TrainableParams(gpu_id=gpu_id):
logger.debug('param ' + str(param) + ' will be updated')
param_grad = model.param_to_grad[param]
# Initialize momentum vector
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
if param in model.biases:
# Special treatment for biases (mainly to match historical impl.
# details):
# (1) Do not apply weight decay
# (2) Use a 2x higher learning rate
model.Scale(param_grad, param_grad, scale=2.0)
elif param in model.gn_params:
# Special treatment for GroupNorm's parameters
model.WeightedSum([param_grad, one, param, wd_gn], param_grad)
elif cfg.SOLVER.WEIGHT_DECAY > 0:
# Apply weight decay to non-bias weights
model.WeightedSum([param_grad, one, param, wd], param_grad)
# Update param_grad and param_momentum in place
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, lr, param],
[param_grad, param_momentum, param],
momentum=cfg.SOLVER.MOMENTUM
)
def _add_allreduce_graph(model):
"""Construct the graph that performs Allreduce on the gradients."""
# Need to all-reduce the per-GPU gradients if training with more than 1 GPU
all_params = model.TrainableParams()
assert len(all_params) % cfg.NUM_GPUS == 0
# The model parameters are replicated on each GPU, get the number
# distinct parameter blobs (i.e., the number of parameter blobs on
# each GPU)
params_per_gpu = int(len(all_params) / cfg.NUM_GPUS)
with c2_utils_CudaScope(0):
# Iterate over distinct parameter blobs
for i in range(params_per_gpu):
# Gradients from all GPUs for this parameter blob
gradients = [
model.param_to_grad[p] for p in all_params[i::params_per_gpu]
]
if len(gradients) > 0:
if cfg.USE_NCCL:
model.net.NCCLAllreduce(gradients, gradients)
else:
muji.Allreduce(model.net, gradients, reduced_affix='')
def _build_forward_graph(model, single_gpu_build_func):
"""Construct the forward graph on each GPU."""
all_loss_gradients = {} # Will include loss gradients from all GPUs
# Build the model on each GPU with correct name and device scoping
for gpu_id in range(cfg.NUM_GPUS):
with c2_utils_NamedCudaScope(gpu_id):
all_loss_gradients.update(single_gpu_build_func(model))
return all_loss_gradients
def optim_build_data_parallel_model(model, single_gpu_build_func):
"""Build a data parallel model given a function that builds the model on a
single GPU.
"""
if model.only_build_forward_pass:
single_gpu_build_func(model)
elif model.train:
all_loss_gradients = _build_forward_graph(model, single_gpu_build_func)
# Add backward pass on all GPUs
model.AddGradientOperators(all_loss_gradients)
if cfg.NUM_GPUS > 1:
_add_allreduce_graph(model)
for gpu_id in range(cfg.NUM_GPUS):
# After allreduce, all GPUs perform SGD updates on their identical
# params and gradients in parallel
with c2_utils_NamedCudaScope(gpu_id):
add_single_gpu_param_update_ops(model, gpu_id)
else:
# Test-time network operates on single GPU
# Test-time parallelism is implemented through multiprocessing
with c2_utils_NamedCudaScope(model.target_gpu_id):
single_gpu_build_func(model)
def body_uv_rcnn_heads_add_body_uv_losses(model, pref=''):
## Reshape for GT blobs.
model.net.Reshape( ['body_uv_X_points'], ['X_points_reshaped'+pref, 'X_points_shape'+pref], shape=( -1 ,1 ) )
model.net.Reshape( ['body_uv_Y_points'], ['Y_points_reshaped'+pref, 'Y_points_shape'+pref], shape=( -1 ,1 ) )
model.net.Reshape( ['body_uv_I_points'], ['I_points_reshaped'+pref, 'I_points_shape'+pref], shape=( -1 ,1 ) )
model.net.Reshape( ['body_uv_Ind_points'], ['Ind_points_reshaped'+pref, 'Ind_points_shape'+pref], shape=( -1 ,1 ) )
## Concat Ind,x,y to get Coordinates blob.
model.net.Concat( ['Ind_points_reshaped'+pref,'X_points_reshaped'+pref, \
'Y_points_reshaped'+pref],['Coordinates'+pref,'Coordinate_Shapes'+pref ], axis = 1 )
##
### Now reshape UV blobs, such that they are 1x1x(196*NumSamples)xNUM_PATCHES
## U blob to
##
model.net.Reshape(['body_uv_U_points'], \
['U_points_reshaped'+pref, 'U_points_old_shape'+pref],\
shape=(-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1,196))
model.net.Transpose(['U_points_reshaped'+pref] ,['U_points_reshaped_transpose'+pref],axes=(0,2,1) )
model.net.Reshape(['U_points_reshaped_transpose'+pref], \
['U_points'+pref, 'U_points_old_shape2'+pref], \
shape=(1,1,-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1))
## V blob
##
model.net.Reshape(['body_uv_V_points'], \
['V_points_reshaped'+pref, 'V_points_old_shape'+pref],\
shape=(-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1,196))
model.net.Transpose(['V_points_reshaped'+pref] ,['V_points_reshaped_transpose'+pref],axes=(0,2,1) )
model.net.Reshape(['V_points_reshaped_transpose'+pref], \
['V_points'+pref, 'V_points_old_shape2'+pref], \
shape=(1,1,-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1))
###
## UV weights blob
##
model.net.Reshape(['body_uv_point_weights'], \
['Uv_point_weights_reshaped'+pref, 'Uv_point_weights_old_shape'+pref],\
shape=(-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1,196))
model.net.Transpose(['Uv_point_weights_reshaped'+pref] ,['Uv_point_weights_reshaped_transpose'+pref],axes=(0,2,1) )
model.net.Reshape(['Uv_point_weights_reshaped_transpose'+pref], \
['Uv_point_weights'+pref, 'Uv_point_weights_old_shape2'+pref], \
shape=(1,1,-1,cfg.BODY_UV_RCNN.NUM_PATCHES+1))
#####################
### Pool IUV for points via bilinear interpolation.
model.PoolPointsInterp(['U_estimated','Coordinates'+pref], ['interp_U'+pref])
model.PoolPointsInterp(['V_estimated','Coordinates'+pref], ['interp_V'+pref])
model.PoolPointsInterp(['Index_UV'+pref,'Coordinates'+pref], ['interp_Index_UV'+pref])
## Reshape interpolated UV coordinates to apply the loss.
model.net.Reshape(['interp_U'+pref], \
['interp_U_reshaped'+pref, 'interp_U_shape'+pref],\
shape=(1, 1, -1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1))
model.net.Reshape(['interp_V'+pref], \
['interp_V_reshaped'+pref, 'interp_V_shape'+pref],\
shape=(1, 1, -1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1))
###
### Do the actual labels here !!!!
model.net.Reshape( ['body_uv_ann_labels'], \
['body_uv_ann_labels_reshaped' +pref, 'body_uv_ann_labels_old_shape'+pref], \
shape=(-1, cfg.BODY_UV_RCNN.HEATMAP_SIZE , cfg.BODY_UV_RCNN.HEATMAP_SIZE))
model.net.Reshape( ['body_uv_ann_weights'], \
['body_uv_ann_weights_reshaped' +pref, 'body_uv_ann_weights_old_shape'+pref], \
shape=( -1 , cfg.BODY_UV_RCNN.HEATMAP_SIZE , cfg.BODY_UV_RCNN.HEATMAP_SIZE))
###
model.net.Cast( ['I_points_reshaped'+pref], ['I_points_reshaped_int'+pref], to=core.DataType.INT32)
### Now add the actual losses
## The mask segmentation loss (dense)
probs_seg_AnnIndex, loss_seg_AnnIndex = model.net.SpatialSoftmaxWithLoss( \
['AnnIndex'+pref, 'body_uv_ann_labels_reshaped'+pref,'body_uv_ann_weights_reshaped'+pref],\
['probs_seg_AnnIndex'+pref,'loss_seg_AnnIndex'+pref], \
scale=cfg.BODY_UV_RCNN.INDEX_WEIGHTS / cfg.NUM_GPUS)
## Point Patch Index Loss.
probs_IndexUVPoints, loss_IndexUVPoints = model.net.SoftmaxWithLoss(\
['interp_Index_UV'+pref,'I_points_reshaped_int'+pref],\
['probs_IndexUVPoints'+pref,'loss_IndexUVPoints'+pref], \
scale=cfg.BODY_UV_RCNN.PART_WEIGHTS / cfg.NUM_GPUS, spatial=0)
## U and V point losses.
loss_Upoints = model.net.SmoothL1Loss( \
['interp_U_reshaped'+pref, 'U_points'+pref, \
'Uv_point_weights'+pref, 'Uv_point_weights'+pref], \
'loss_Upoints'+pref, \
scale=cfg.BODY_UV_RCNN.POINT_REGRESSION_WEIGHTS / cfg.NUM_GPUS)
loss_Vpoints = model.net.SmoothL1Loss( \
['interp_V_reshaped'+pref, 'V_points'+pref, \
'Uv_point_weights'+pref, 'Uv_point_weights'+pref], \
'loss_Vpoints'+pref, scale=cfg.BODY_UV_RCNN.POINT_REGRESSION_WEIGHTS / cfg.NUM_GPUS)
## Add the losses.
loss_gradients = blob_utils_get_loss_gradients(model, \
[ loss_Upoints, loss_Vpoints, loss_seg_AnnIndex, loss_IndexUVPoints])
model.losses = list(set(model.losses + \
['loss_Upoints'+pref , 'loss_Vpoints'+pref , \
'loss_seg_AnnIndex'+pref ,'loss_IndexUVPoints'+pref]))
return loss_gradients
def body_uv_rcnn_heads_add_body_uv_outputs(model, blob_in, dim, pref=''):
####
model.ConvTranspose(blob_in, 'AnnIndex_lowres'+pref, dim, 15,cfg.BODY_UV_RCNN.DECONV_KERNEL, pad=int(cfg.BODY_UV_RCNN.DECONV_KERNEL / 2 - 1), stride=2, weight_init=(cfg.BODY_UV_RCNN.CONV_INIT, {'std': 0.001}), bias_init=('ConstantFill', {'value': 0.}))
####
model.ConvTranspose(blob_in, 'Index_UV_lowres'+pref, dim, cfg.BODY_UV_RCNN.NUM_PATCHES+1,cfg.BODY_UV_RCNN.DECONV_KERNEL, pad=int(cfg.BODY_UV_RCNN.DECONV_KERNEL / 2 - 1), stride=2, weight_init=(cfg.BODY_UV_RCNN.CONV_INIT, {'std': 0.001}), bias_init=('ConstantFill', {'value': 0.}))
####
model.ConvTranspose(
blob_in, 'U_lowres'+pref, dim, (cfg.BODY_UV_RCNN.NUM_PATCHES+1),
cfg.BODY_UV_RCNN.DECONV_KERNEL,
pad=int(cfg.BODY_UV_RCNN.DECONV_KERNEL / 2 - 1),
stride=2,
weight_init=(cfg.BODY_UV_RCNN.CONV_INIT, {'std': 0.001}),
bias_init=('ConstantFill', {'value': 0.}))
#####
model.ConvTranspose(
blob_in, 'V_lowres'+pref, dim, cfg.BODY_UV_RCNN.NUM_PATCHES+1,
cfg.BODY_UV_RCNN.DECONV_KERNEL,
pad=int(cfg.BODY_UV_RCNN.DECONV_KERNEL / 2 - 1),
stride=2,
weight_init=(cfg.BODY_UV_RCNN.CONV_INIT, {'std': 0.001}),
bias_init=('ConstantFill', {'value': 0.}))
####
blob_Ann_Index = model.BilinearInterpolation('AnnIndex_lowres'+pref, 'AnnIndex'+pref, cfg.BODY_UV_RCNN.NUM_PATCHES+1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1, cfg.BODY_UV_RCNN.UP_SCALE)
blob_Index = model.BilinearInterpolation('Index_UV_lowres'+pref, 'Index_UV'+pref, cfg.BODY_UV_RCNN.NUM_PATCHES+1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1, cfg.BODY_UV_RCNN.UP_SCALE)
blob_U = model.BilinearInterpolation('U_lowres'+pref, 'U_estimated'+pref, cfg.BODY_UV_RCNN.NUM_PATCHES+1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1, cfg.BODY_UV_RCNN.UP_SCALE)
blob_V = model.BilinearInterpolation('V_lowres'+pref, 'V_estimated'+pref, cfg.BODY_UV_RCNN.NUM_PATCHES+1 , cfg.BODY_UV_RCNN.NUM_PATCHES+1, cfg.BODY_UV_RCNN.UP_SCALE)
###
return blob_U,blob_V,blob_Index,blob_Ann_Index
def _add_roi_body_uv_head(
model, add_roi_body_uv_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a body UV prediction head to the model."""
# Capture model graph before adding the mask head
bbox_net = copy.deepcopy(model.net.Proto())
# Add the body UV head
blob_body_uv_head, dim_body_uv_head = add_roi_body_uv_head_func(
model, blob_in, dim_in, spatial_scale_in
)
# Add the body UV output
blobs_body_uv = body_uv_rcnn_heads_add_body_uv_outputs(
model, blob_body_uv_head, dim_body_uv_head
)
if not model.train: # == inference
# Inference uses a cascade of box predictions, then body uv predictions
# This requires separate nets for box and body uv prediction.
# So we extract the keypoint prediction net, store it as its own
# network, then restore model.net to be the bbox-only network
model.body_uv_net, body_uv_blob_out = c2_utils_SuffixNet(
'body_uv_net', model.net, len(bbox_net.op), blobs_body_uv
)
model.net._net = bbox_net
loss_gradients = None
else:
loss_gradients = body_uv_rcnn_heads_add_body_uv_losses(model)
return loss_gradients
def keypoint_rcnn_heads_add_keypoint_losses(model):
"""Add Mask R-CNN keypoint specific losses."""
# Reshape input from (N, K, H, W) to (NK, HW)
model.net.Reshape(
['kps_score'], ['kps_score_reshaped', '_kps_score_old_shape'],
shape=(-1, cfg.KRCNN.HEATMAP_SIZE * cfg.KRCNN.HEATMAP_SIZE)
)
# Softmax across **space** (woahh....space!)
# Note: this is not what is commonly called "spatial softmax"
# (i.e., softmax applied along the channel dimension at each spatial
# location); This is softmax applied over a set of spatial locations (i.e.,
# each spatial location is a "class").
kps_prob, loss_kps = model.net.SoftmaxWithLoss(
['kps_score_reshaped', 'keypoint_locations_int32', 'keypoint_weights'],
['kps_prob', 'loss_kps'],
scale=cfg.KRCNN.LOSS_WEIGHT / cfg.NUM_GPUS,
spatial=0
)
if not cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS:
# Discussion: the softmax loss above will average the loss by the sum of
# keypoint_weights, i.e. the total number of visible keypoints. Since
# the number of visible keypoints can vary significantly between
# minibatches, this has the effect of up-weighting the importance of
# minibatches with few visible keypoints. (Imagine the extreme case of
# only one visible keypoint versus N: in the case of N, each one
# contributes 1/N to the gradient compared to the single keypoint
# determining the gradient direction). Instead, we can normalize the
# loss by the total number of keypoints, if it were the case that all
# keypoints were visible in a full minibatch. (Returning to the example,
# this means that the one visible keypoint contributes as much as each
# of the N keypoints.)
model.StopGradient(
'keypoint_loss_normalizer', 'keypoint_loss_normalizer'
)
loss_kps = model.net.Mul(
['loss_kps', 'keypoint_loss_normalizer'], 'loss_kps_normalized'
)
loss_gradients = blob_utils_get_loss_gradients(model, [loss_kps])
model.AddLosses(loss_kps)
return loss_gradients
def keypoint_rcnn_heads_add_keypoint_outputs(model, blob_in, dim):
"""Add Mask R-CNN keypoint specific outputs: keypoint heatmaps."""
# NxKxHxW
upsample_heatmap = (cfg.KRCNN.UP_SCALE > 1)
if cfg.KRCNN.USE_DECONV:
# Apply ConvTranspose to the feature representation; results in 2x
# upsampling
blob_in = model.ConvTranspose(
blob_in,
'kps_deconv',
dim,
cfg.KRCNN.DECONV_DIM,
kernel=cfg.KRCNN.DECONV_KERNEL,
pad=int(cfg.KRCNN.DECONV_KERNEL / 2 - 1),
stride=2,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
model.Relu('kps_deconv', 'kps_deconv')
dim = cfg.KRCNN.DECONV_DIM
if upsample_heatmap:
blob_name = 'kps_score_lowres'
else:
blob_name = 'kps_score'
if cfg.KRCNN.USE_DECONV_OUTPUT:
# Use ConvTranspose to predict heatmaps; results in 2x upsampling
blob_out = model.ConvTranspose(
blob_in,
blob_name,
dim,
cfg.KRCNN.NUM_KEYPOINTS,
kernel=cfg.KRCNN.DECONV_KERNEL,
pad=int(cfg.KRCNN.DECONV_KERNEL / 2 - 1),
stride=2,
weight_init=(cfg.KRCNN.CONV_INIT, {'std': 0.001}),
bias_init=const_fill(0.0)
)
else:
# Use Conv to predict heatmaps; does no upsampling
blob_out = model.Conv(
blob_in,
blob_name,
dim,
cfg.KRCNN.NUM_KEYPOINTS,
kernel=1,
pad=0,
stride=1,
weight_init=(cfg.KRCNN.CONV_INIT, {'std': 0.001}),
bias_init=const_fill(0.0)
)
if upsample_heatmap:
# Increase heatmap output size via bilinear upsampling
blob_out = model.BilinearInterpolation(
blob_out, 'kps_score', cfg.KRCNN.NUM_KEYPOINTS,
cfg.KRCNN.NUM_KEYPOINTS, cfg.KRCNN.UP_SCALE
)
return blob_out
def _add_roi_keypoint_head(
model, add_roi_keypoint_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a keypoint prediction head to the model."""
# Capture model graph before adding the mask head
bbox_net = copy.deepcopy(model.net.Proto())
# Add the keypoint head
blob_keypoint_head, dim_keypoint_head = add_roi_keypoint_head_func(
model, blob_in, dim_in, spatial_scale_in
)
# Add the keypoint output
blob_keypoint = keypoint_rcnn_heads_add_keypoint_outputs(
model, blob_keypoint_head, dim_keypoint_head
)
if not model.train: # == inference
# Inference uses a cascade of box predictions, then keypoint predictions
# This requires separate nets for box and keypoint prediction.
# So we extract the keypoint prediction net, store it as its own
# network, then restore model.net to be the bbox-only network
model.keypoint_net, keypoint_blob_out = c2_utils_SuffixNet(
'keypoint_net', model.net, len(bbox_net.op), blob_keypoint
)
model.net._net = bbox_net
loss_gradients = None
else:
loss_gradients = keypoint_rcnn_heads_add_keypoint_losses(model)
return loss_gradients
def mask_rcnn_heads_add_mask_rcnn_losses(model, blob_mask):
"""Add Mask R-CNN specific losses."""
loss_mask = model.net.SigmoidCrossEntropyLoss(
[blob_mask, 'masks_int32'],
'loss_mask',
scale=model.GetLossScale() * cfg.MRCNN.WEIGHT_LOSS_MASK
)
loss_gradients = blob_utils_get_loss_gradients(model, [loss_mask])
model.AddLosses('loss_mask')
return loss_gradients
def BlobReferenceList(blob_ref_or_list):
"""Ensure that the argument is returned as a list of BlobReferences."""
if isinstance(blob_ref_or_list, core.BlobReference):
return [blob_ref_or_list]
elif type(blob_ref_or_list) in (list, tuple):
for b in blob_ref_or_list:
assert isinstance(b, core.BlobReference)
return blob_ref_or_list
else:
raise TypeError(
'blob_ref_or_list must be a BlobReference or a list/tuple of '
'BlobReferences'
)
def c2_utils_SuffixNet(name, net, prefix_len, outputs):
"""Returns a new Net from the given Net (`net`) that includes only the ops
after removing the first `prefix_len` number of ops. The new Net is thus a
suffix of `net`. Blobs listed in `outputs` are registered as external output
blobs.
"""
outputs = BlobReferenceList(outputs)
for output in outputs:
assert net.BlobIsDefined(output)
new_net = net.Clone(name)
del new_net.Proto().op[:]
del new_net.Proto().external_input[:]
del new_net.Proto().external_output[:]
# Add suffix ops
new_net.Proto().op.extend(net.Proto().op[prefix_len:])
# Add external input blobs
# Treat any undefined blobs as external inputs
input_names = [
i for op in new_net.Proto().op for i in op.input
if not new_net.BlobIsDefined(i)]
new_net.Proto().external_input.extend(input_names)
# Add external output blobs
output_names = [str(o) for o in outputs]
new_net.Proto().external_output.extend(output_names)
return new_net, [new_net.GetBlobRef(o) for o in output_names]
def mask_rcnn_heads_add_mask_rcnn_outputs(model, blob_in, dim):
"""Add Mask R-CNN specific outputs: either mask logits or probs."""
num_cls = cfg.MODEL.NUM_CLASSES if cfg.MRCNN.CLS_SPECIFIC_MASK else 1
if cfg.MRCNN.USE_FC_OUTPUT:
# Predict masks with a fully connected layer (ignore 'fcn' in the blob
# name)
blob_out = model.FC(
blob_in,
'mask_fcn_logits',
dim,
num_cls * cfg.MRCNN.RESOLUTION**2,
weight_init=gauss_fill(0.001),
bias_init=const_fill(0.0)
)
else:
# Predict mask using Conv
# Use GaussianFill for class-agnostic mask prediction; fills based on
# fan-in can be too large in this case and cause divergence
fill = (
cfg.MRCNN.CONV_INIT
if cfg.MRCNN.CLS_SPECIFIC_MASK else 'GaussianFill'
)
blob_out = model.Conv(
blob_in,
'mask_fcn_logits',
dim,
num_cls,
kernel=1,
pad=0,
stride=1,
weight_init=(fill, {'std': 0.001}),
bias_init=const_fill(0.0)
)
if cfg.MRCNN.UPSAMPLE_RATIO > 1:
blob_out = model.BilinearInterpolation(
'mask_fcn_logits', 'mask_fcn_logits_up', num_cls, num_cls,
cfg.MRCNN.UPSAMPLE_RATIO
)
if not model.train: # == if test
blob_out = model.net.Sigmoid(blob_out, 'mask_fcn_probs')
return blob_out
def _add_roi_mask_head(
model, add_roi_mask_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a mask prediction head to the model."""
# Capture model graph before adding the mask head
bbox_net = copy.deepcopy(model.net.Proto())
# Add the mask head
blob_mask_head, dim_mask_head = add_roi_mask_head_func(
model, blob_in, dim_in, spatial_scale_in
)
# Add the mask output
blob_mask = mask_rcnn_heads_add_mask_rcnn_outputs(
model, blob_mask_head, dim_mask_head
)
if not model.train: # == inference
# Inference uses a cascade of box predictions, then mask predictions.
# This requires separate nets for box and mask prediction.
# So we extract the mask prediction net, store it as its own network,
# then restore model.net to be the bbox-only network
model.mask_net, blob_mask = c2_utils_SuffixNet(
'mask_net', model.net, len(bbox_net.op), blob_mask
)
model.net._net = bbox_net
loss_gradients = None
else:
loss_gradients = mask_rcnn_heads_add_mask_rcnn_losses(model, blob_mask)
return loss_gradients
def fast_rcnn_heads_add_fast_rcnn_losses(model):
"""Add losses for RoI classification and bounding box regression."""
cls_prob, loss_cls = model.net.SoftmaxWithLoss(
['cls_score', 'labels_int32'], ['cls_prob', 'loss_cls'],
scale=model.GetLossScale()
)
loss_bbox = model.net.SmoothL1Loss(
[
'bbox_pred', 'bbox_targets', 'bbox_inside_weights',
'bbox_outside_weights'
],
'loss_bbox',
scale=model.GetLossScale()
)
loss_gradients = blob_utils_get_loss_gradients(model, [loss_cls, loss_bbox])
model.Accuracy(['cls_prob', 'labels_int32'], 'accuracy_cls')
model.AddLosses(['loss_cls', 'loss_bbox'])
model.AddMetrics('accuracy_cls')
return loss_gradients
def fast_rcnn_heads_add_fast_rcnn_outputs(model, blob_in, dim):
"""Add RoI classification and bounding box regression output ops."""
# Box classification layer
model.FC(
blob_in,
'cls_score',
dim,
model.num_classes,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
if not model.train: # == if test
# Only add softmax when testing; during training the softmax is combined
# with the label cross entropy loss for numerical stability
model.Softmax('cls_score', 'cls_prob', engine='CUDNN')
# Box regression layer
num_bbox_reg_classes = (
2 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else model.num_classes
)
model.FC(
blob_in,
'bbox_pred',
dim,
num_bbox_reg_classes * 4,
weight_init=gauss_fill(0.001),
bias_init=const_fill(0.0)
)
def _add_fast_rcnn_head(
model, add_roi_box_head_func, blob_in, dim_in, spatial_scale_in
):
"""Add a Fast R-CNN head to the model."""
blob_frcn, dim_frcn = add_roi_box_head_func(
model, blob_in, dim_in, spatial_scale_in
)
fast_rcnn_heads_add_fast_rcnn_outputs(model, blob_frcn, dim_frcn)
if model.train:
loss_gradients = fast_rcnn_heads_add_fast_rcnn_losses(model)
else:
loss_gradients = None
return loss_gradients
def _narrow_to_fpn_roi_levels(blobs, spatial_scales):
"""Return only the blobs and spatial scales that will be used for RoI heads.
Inputs `blobs` and `spatial_scales` may include extra blobs and scales that
are used for RPN proposals, but not for RoI heads.
"""
# Code only supports case when RPN and ROI min levels are the same
assert cfg.FPN.RPN_MIN_LEVEL == cfg.FPN.ROI_MIN_LEVEL
# RPN max level can be >= to ROI max level
assert cfg.FPN.RPN_MAX_LEVEL >= cfg.FPN.ROI_MAX_LEVEL
# FPN RPN max level might be > FPN ROI max level in which case we
# need to discard some leading conv blobs (blobs are ordered from
# max/coarsest level to min/finest level)
num_roi_levels = cfg.FPN.ROI_MAX_LEVEL - cfg.FPN.ROI_MIN_LEVEL + 1
return blobs[-num_roi_levels:], spatial_scales[-num_roi_levels:]
def add_single_scale_rpn_losses(model):
"""Add losses for a single scale RPN model (i.e., no FPN)."""
# Spatially narrow the full-sized RPN label arrays to match the feature map
# shape
model.net.SpatialNarrowAs(
['rpn_labels_int32_wide', 'rpn_cls_logits'], 'rpn_labels_int32'
)
for key in ('targets', 'inside_weights', 'outside_weights'):
model.net.SpatialNarrowAs(
['rpn_bbox_' + key + '_wide', 'rpn_bbox_pred'], 'rpn_bbox_' + key
)
loss_rpn_cls = model.net.SigmoidCrossEntropyLoss(
['rpn_cls_logits', 'rpn_labels_int32'],
'loss_rpn_cls',
scale=model.GetLossScale()
)
loss_rpn_bbox = model.net.SmoothL1Loss(
[
'rpn_bbox_pred', 'rpn_bbox_targets', 'rpn_bbox_inside_weights',
'rpn_bbox_outside_weights'
],
'loss_rpn_bbox',
beta=1. / 9.,
scale=model.GetLossScale()
)
loss_gradients = blob_utils_get_loss_gradients(
model, [loss_rpn_cls, loss_rpn_bbox]
)
model.AddLosses(['loss_rpn_cls', 'loss_rpn_bbox'])
return loss_gradients
def add_single_scale_rpn_outputs(model, blob_in, dim_in, spatial_scale):
"""Add RPN outputs to a single scale model (i.e., no FPN)."""
anchors = generate_anchors(
stride=1. / spatial_scale,
sizes=cfg.RPN.SIZES,
aspect_ratios=cfg.RPN.ASPECT_RATIOS
)
num_anchors = anchors.shape[0]
dim_out = dim_in
# RPN hidden representation
model.Conv(
blob_in,
'conv_rpn',
dim_in,
dim_out,
kernel=3,
pad=1,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
model.Relu('conv_rpn', 'conv_rpn')
# Proposal classification scores
model.Conv(
'conv_rpn',
'rpn_cls_logits',
dim_in,
num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
# Proposal bbox regression deltas
model.Conv(
'conv_rpn',
'rpn_bbox_pred',
dim_in,
4 * num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
if not model.train or cfg.MODEL.FASTER_RCNN:
# Proposals are needed during:
# 1) inference (== not model.train) for RPN only and Faster R-CNN
# OR
# 2) training for Faster R-CNN
# Otherwise (== training for RPN only), proposals are not needed
model.net.Sigmoid('rpn_cls_logits', 'rpn_cls_probs')
model.GenerateProposals(
['rpn_cls_probs', 'rpn_bbox_pred', 'im_info'],
['rpn_rois', 'rpn_roi_probs'],
anchors=anchors,
spatial_scale=spatial_scale
)
if cfg.MODEL.FASTER_RCNN:
if model.train:
# Add op that generates training labels for in-network RPN proposals
model.GenerateProposalLabels(['rpn_rois', 'roidb', 'im_info'])
else:
# Alias rois to rpn_rois for inference
model.net.Alias('rpn_rois', 'rois')
def blob_utils_get_loss_gradients(model, loss_blobs):
"""Generate a gradient of 1 for each loss specified in 'loss_blobs'"""
loss_gradients = {}
for b in loss_blobs:
loss_grad = model.net.ConstantFill(b, [b + '_grad'], value=1.0)
loss_gradients[str(b)] = str(loss_grad)
return loss_gradients
def FPN_add_fpn_rpn_losses(model):
"""Add RPN on FPN specific losses."""
loss_gradients = {}
for lvl in range(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.RPN_MAX_LEVEL + 1):
slvl = str(lvl)
# Spatially narrow the full-sized RPN label arrays to match the feature map
# shape
model.net.SpatialNarrowAs(
['rpn_labels_int32_wide_fpn' + slvl, 'rpn_cls_logits_fpn' + slvl],
'rpn_labels_int32_fpn' + slvl
)
for key in ('targets', 'inside_weights', 'outside_weights'):
model.net.SpatialNarrowAs(
[
'rpn_bbox_' + key + '_wide_fpn' + slvl,
'rpn_bbox_pred_fpn' + slvl
],
'rpn_bbox_' + key + '_fpn' + slvl
)
loss_rpn_cls_fpn = model.net.SigmoidCrossEntropyLoss(
['rpn_cls_logits_fpn' + slvl, 'rpn_labels_int32_fpn' + slvl],
'loss_rpn_cls_fpn' + slvl,
normalize=0,
scale=(
model.GetLossScale() / cfg.TRAIN.RPN_BATCH_SIZE_PER_IM /
cfg.TRAIN.IMS_PER_BATCH
)
)
# Normalization by (1) RPN_BATCH_SIZE_PER_IM and (2) IMS_PER_BATCH is
# handled by (1) setting bbox outside weights and (2) SmoothL1Loss
# normalizes by IMS_PER_BATCH
loss_rpn_bbox_fpn = model.net.SmoothL1Loss(
[
'rpn_bbox_pred_fpn' + slvl, 'rpn_bbox_targets_fpn' + slvl,
'rpn_bbox_inside_weights_fpn' + slvl,
'rpn_bbox_outside_weights_fpn' + slvl
],
'loss_rpn_bbox_fpn' + slvl,
beta=1. / 9.,
scale=model.GetLossScale(),
)
loss_gradients.update(
blob_utils_get_loss_gradients(model, [loss_rpn_cls_fpn, loss_rpn_bbox_fpn])
)
model.AddLosses(['loss_rpn_cls_fpn' + slvl, 'loss_rpn_bbox_fpn' + slvl])
return loss_gradients
def const_fill(value):
"""Constant fill helper to reduce verbosity."""
return ('ConstantFill', {'value': value})
def gauss_fill(std):
"""Gaussian fill helper to reduce verbosity."""
return ('GaussianFill', {'std': std})
def FPN_add_fpn_rpn_outputs(model, blobs_in, dim_in, spatial_scales):
"""Add RPN on FPN specific outputs."""
num_anchors = len(cfg.FPN.RPN_ASPECT_RATIOS)
dim_out = dim_in
k_max = cfg.FPN.RPN_MAX_LEVEL # coarsest level of pyramid
k_min = cfg.FPN.RPN_MIN_LEVEL # finest level of pyramid
assert len(blobs_in) == k_max - k_min + 1
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl] # blobs_in is in reversed order
sc = spatial_scales[k_max - lvl] # in reversed order
slvl = str(lvl)
if lvl == k_min:
# Create conv ops with randomly initialized weights and
# zeroed biases for the first FPN level; these will be shared by
# all other FPN levels
# RPN hidden representation
conv_rpn_fpn = model.Conv(
bl_in,
'conv_rpn_fpn' + slvl,
dim_in,
dim_out,
kernel=3,
pad=1,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
model.Relu(conv_rpn_fpn, conv_rpn_fpn)
# Proposal classification scores
rpn_cls_logits_fpn = model.Conv(
conv_rpn_fpn,
'rpn_cls_logits_fpn' + slvl,
dim_in,
num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
# Proposal bbox regression deltas
rpn_bbox_pred_fpn = model.Conv(
conv_rpn_fpn,
'rpn_bbox_pred_fpn' + slvl,
dim_in,
4 * num_anchors,
kernel=1,
pad=0,
stride=1,
weight_init=gauss_fill(0.01),
bias_init=const_fill(0.0)
)
else:
# Share weights and biases
sk_min = str(k_min)
# RPN hidden representation
conv_rpn_fpn = model.ConvShared(
bl_in,
'conv_rpn_fpn' + slvl,
dim_in,
dim_out,
kernel=3,
pad=1,
stride=1,
weight='conv_rpn_fpn' + sk_min + '_w',
bias='conv_rpn_fpn' + sk_min + '_b'
)
model.Relu(conv_rpn_fpn, conv_rpn_fpn)
# Proposal classification scores
rpn_cls_logits_fpn = model.ConvShared(
conv_rpn_fpn,
'rpn_cls_logits_fpn' + slvl,
dim_in,
num_anchors,
kernel=1,
pad=0,
stride=1,
weight='rpn_cls_logits_fpn' + sk_min + '_w',
bias='rpn_cls_logits_fpn' + sk_min + '_b'
)
# Proposal bbox regression deltas
rpn_bbox_pred_fpn = model.ConvShared(
conv_rpn_fpn,
'rpn_bbox_pred_fpn' + slvl,
dim_in,
4 * num_anchors,
kernel=1,
pad=0,
stride=1,
weight='rpn_bbox_pred_fpn' + sk_min + '_w',
bias='rpn_bbox_pred_fpn' + sk_min + '_b'
)
if not model.train or cfg.MODEL.FASTER_RCNN:
# Proposals are needed during:
# 1) inference (== not model.train) for RPN only and Faster R-CNN
# OR
# 2) training for Faster R-CNN
# Otherwise (== training for RPN only), proposals are not needed
lvl_anchors = generate_anchors(
stride=2.**lvl,
sizes=(cfg.FPN.RPN_ANCHOR_START_SIZE * 2.**(lvl - k_min), ),
aspect_ratios=cfg.FPN.RPN_ASPECT_RATIOS
)
rpn_cls_probs_fpn = model.net.Sigmoid(
rpn_cls_logits_fpn, 'rpn_cls_probs_fpn' + slvl
)
model.GenerateProposals(
[rpn_cls_probs_fpn, rpn_bbox_pred_fpn, 'im_info'],
['rpn_rois_fpn' + slvl, 'rpn_roi_probs_fpn' + slvl],
anchors=lvl_anchors,
spatial_scale=sc
)
def rpn_heads_add_generic_rpn_outputs(model, blob_in, dim_in, spatial_scale_in):
"""Add RPN outputs (objectness classification and bounding box regression)
to an RPN model. Abstracts away the use of FPN.
"""
loss_gradients = None
if cfg.FPN.FPN_ON:
# Delegate to the FPN module
FPN_add_fpn_rpn_outputs(model, blob_in, dim_in, spatial_scale_in)
if cfg.MODEL.FASTER_RCNN:
# CollectAndDistributeFpnRpnProposals also labels proposals when in
# training mode
model.CollectAndDistributeFpnRpnProposals()
if model.train:
loss_gradients = FPN_add_fpn_rpn_losses(model)
else:
# Not using FPN, add RPN to a single scale
add_single_scale_rpn_outputs(model, blob_in, dim_in, spatial_scale_in)
if model.train:
loss_gradients = add_single_scale_rpn_losses(model)
return loss_gradients
def c2_utils_BlobReferenceList(blob_ref_or_list):
"""Ensure that the argument is returned as a list of BlobReferences."""
if isinstance(blob_ref_or_list, core.BlobReference):
return [blob_ref_or_list]
elif type(blob_ref_or_list) in (list, tuple):
for b in blob_ref_or_list:
assert isinstance(b, core.BlobReference)
return blob_ref_or_list
else:
raise TypeError(
'blob_ref_or_list must be a BlobReference or a list/tuple of '
'BlobReferences'
)
def build_generic_detection_model(
model,
add_conv_body_func,
add_roi_box_head_func=None,
add_roi_mask_head_func=None,
add_roi_keypoint_head_func=None,
add_roi_body_uv_head_func=None,
freeze_conv_body=False
):
def _single_gpu_build_func(model):
"""Build the model on a single GPU. Can be called in a loop over GPUs
with name and device scoping to create a data parallel model.
"""
# Add the conv body (called "backbone architecture" in papers)
# E.g., ResNet-50, ResNet-50-FPN, ResNeXt-101-FPN, etc.
blob_conv, dim_conv, spatial_scale_conv = add_conv_body_func(model)
if freeze_conv_body:
for b in c2_utils_BlobReferenceList(blob_conv):
model.StopGradient(b, b)
if not model.train: # == inference
# Create a net that can be used to execute the conv body on an image
# (without also executing RPN or any other network heads)
model.conv_body_net = model.net.Clone('conv_body_net')
head_loss_gradients = {
'rpn': None,
'box': None,
'mask': None,
'keypoints': None,
'body_uv' : None,
}
if cfg.RPN.RPN_ON:
# Add the RPN head
head_loss_gradients['rpn'] = rpn_heads_add_generic_rpn_outputs(
model, blob_conv, dim_conv, spatial_scale_conv
)
if cfg.FPN.FPN_ON:
# After adding the RPN head, restrict FPN blobs and scales to
# those used in the RoI heads
blob_conv, spatial_scale_conv = _narrow_to_fpn_roi_levels(
blob_conv, spatial_scale_conv
)
if not cfg.MODEL.RPN_ONLY:
# Add the Fast R-CNN head
head_loss_gradients['box'] = _add_fast_rcnn_head(
model, add_roi_box_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if cfg.MODEL.MASK_ON:
# Add the mask head
head_loss_gradients['mask'] = _add_roi_mask_head(
model, add_roi_mask_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if cfg.MODEL.KEYPOINTS_ON:
# Add the keypoint head
head_loss_gradients['keypoint'] = _add_roi_keypoint_head(
model, add_roi_keypoint_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if cfg.MODEL.BODY_UV_ON:
# Add the body UV head
head_loss_gradients['body_uv'] = _add_roi_body_uv_head(
model, add_roi_body_uv_head_func, blob_conv, dim_conv,
spatial_scale_conv
)
if model.train:
loss_gradients = {}
for lg in head_loss_gradients.values():
if lg is not None:
loss_gradients.update(lg)
return loss_gradients
else:
return None
optim_build_data_parallel_model(model, _single_gpu_build_func)
return model
def get_group_gn(dim):
"""
get number of groups used by GroupNorm, based on number of channels
"""
dim_per_gp = cfg.GROUP_NORM.DIM_PER_GP
num_groups = cfg.GROUP_NORM.NUM_GROUPS
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0
group_gn = num_groups
return group_gn
def add_topdown_lateral_module(
model, fpn_top, fpn_lateral, fpn_bottom, dim_top, dim_lateral
):
"""Add a top-down lateral module."""
# Lateral 1x1 conv
if cfg.FPN.USE_GN:
# use GroupNorm
lat = model.ConvGN(
fpn_lateral,
fpn_bottom + '_lateral',
dim_in=dim_lateral,
dim_out=dim_top,
group_gn=get_group_gn(dim_top),
kernel=1,
pad=0,
stride=1,
weight_init=(
const_fill(0.0) if cfg.FPN.ZERO_INIT_LATERAL
else ('XavierFill', {})),
bias_init=const_fill(0.0)
)
else:
lat = model.Conv(
fpn_lateral,
fpn_bottom + '_lateral',
dim_in=dim_lateral,
dim_out=dim_top,
kernel=1,
pad=0,
stride=1,
weight_init=(
const_fill(0.0)
if cfg.FPN.ZERO_INIT_LATERAL else ('XavierFill', {})
),
bias_init=const_fill(0.0)
)
# Top-down 2x upsampling
td = model.net.UpsampleNearest(fpn_top, fpn_bottom + '_topdown', scale=2)
# Sum lateral and top-down
model.net.Sum([lat, td], fpn_bottom)
def get_min_max_levels():
"""The min and max FPN levels required for supporting RPN and/or RoI
transform operations on multiple FPN levels.
"""
min_level = LOWEST_BACKBONE_LVL
max_level = HIGHEST_BACKBONE_LVL
if cfg.FPN.MULTILEVEL_RPN and not cfg.FPN.MULTILEVEL_ROIS:
max_level = cfg.FPN.RPN_MAX_LEVEL
min_level = cfg.FPN.RPN_MIN_LEVEL
if not cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS:
max_level = cfg.FPN.ROI_MAX_LEVEL
min_level = cfg.FPN.ROI_MIN_LEVEL
if cfg.FPN.MULTILEVEL_RPN and cfg.FPN.MULTILEVEL_ROIS:
max_level = max(cfg.FPN.RPN_MAX_LEVEL, cfg.FPN.ROI_MAX_LEVEL)
min_level = min(cfg.FPN.RPN_MIN_LEVEL, cfg.FPN.ROI_MIN_LEVEL)
return min_level, max_level
def add_fpn(model, fpn_level_info):
"""Add FPN connections based on the model described in the FPN paper."""
# FPN levels are built starting from the highest/coarest level of the
# backbone (usually "conv5"). First we build down, recursively constructing
# lower/finer resolution FPN levels. Then we build up, constructing levels
# that are even higher/coarser than the starting level.
fpn_dim = cfg.FPN.DIM
min_level, max_level = get_min_max_levels()
# Count the number of backbone stages that we will generate FPN levels for
# starting from the coarest backbone stage (usually the "conv5"-like level)
# E.g., if the backbone level info defines stages 4 stages: "conv5",
# "conv4", ... "conv2" and min_level=2, then we end up with 4 - (2 - 2) = 4
# backbone stages to add FPN to.
num_backbone_stages = (
len(fpn_level_info.blobs) - (min_level - LOWEST_BACKBONE_LVL)
)
lateral_input_blobs = fpn_level_info.blobs[:num_backbone_stages]
output_blobs = [
'fpn_inner_{}'.format(s)
for s in fpn_level_info.blobs[:num_backbone_stages]
]
fpn_dim_lateral = fpn_level_info.dims
xavier_fill = ('XavierFill', {})
# For the coarsest backbone level: 1x1 conv only seeds recursion
if cfg.FPN.USE_GN:
# use GroupNorm
c = model.ConvGN(
lateral_input_blobs[0],
output_blobs[0], # note: this is a prefix
dim_in=fpn_dim_lateral[0],
dim_out=fpn_dim,
group_gn=get_group_gn(fpn_dim),
kernel=1,
pad=0,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
output_blobs[0] = c # rename it
else:
model.Conv(
lateral_input_blobs[0],
output_blobs[0],
dim_in=fpn_dim_lateral[0],
dim_out=fpn_dim,
kernel=1,
pad=0,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
#
# Step 1: recursively build down starting from the coarsest backbone level
#
# For other levels add top-down and lateral connections
for i in range(num_backbone_stages - 1):
add_topdown_lateral_module(
model,
output_blobs[i], # top-down blob
lateral_input_blobs[i + 1], # lateral blob
output_blobs[i + 1], # next output blob
fpn_dim, # output dimension
fpn_dim_lateral[i + 1] # lateral input dimension
)
# Post-hoc scale-specific 3x3 convs
blobs_fpn = []
spatial_scales = []
for i in range(num_backbone_stages):
if cfg.FPN.USE_GN:
# use GroupNorm
fpn_blob = model.ConvGN(
output_blobs[i],
'fpn_{}'.format(fpn_level_info.blobs[i]),
dim_in=fpn_dim,
dim_out=fpn_dim,
group_gn=get_group_gn(fpn_dim),
kernel=3,
pad=1,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
else:
fpn_blob = model.Conv(
output_blobs[i],
'fpn_{}'.format(fpn_level_info.blobs[i]),
dim_in=fpn_dim,
dim_out=fpn_dim,
kernel=3,
pad=1,
stride=1,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
blobs_fpn += [fpn_blob]
spatial_scales += [fpn_level_info.spatial_scales[i]]
#
# Step 2: build up starting from the coarsest backbone level
#
# Check if we need the P6 feature map
if not cfg.FPN.EXTRA_CONV_LEVELS and max_level == HIGHEST_BACKBONE_LVL + 1:
# Original FPN P6 level implementation from our CVPR'17 FPN paper
P6_blob_in = blobs_fpn[0]
P6_name = P6_blob_in + '_subsampled_2x'
# Use max pooling to simulate stride 2 subsampling
P6_blob = model.MaxPool(P6_blob_in, P6_name, kernel=1, pad=0, stride=2)
blobs_fpn.insert(0, P6_blob)
spatial_scales.insert(0, spatial_scales[0] * 0.5)
# Coarser FPN levels introduced for RetinaNet
if cfg.FPN.EXTRA_CONV_LEVELS and max_level > HIGHEST_BACKBONE_LVL:
fpn_blob = fpn_level_info.blobs[0]
dim_in = fpn_level_info.dims[0]
for i in range(HIGHEST_BACKBONE_LVL + 1, max_level + 1):
fpn_blob_in = fpn_blob
if i > HIGHEST_BACKBONE_LVL + 1:
fpn_blob_in = model.Relu(fpn_blob, fpn_blob + '_relu')
fpn_blob = model.Conv(
fpn_blob_in,
'fpn_' + str(i),
dim_in=dim_in,
dim_out=fpn_dim,
kernel=3,
pad=1,
stride=2,
weight_init=xavier_fill,
bias_init=const_fill(0.0)
)
dim_in = fpn_dim
blobs_fpn.insert(0, fpn_blob)
spatial_scales.insert(0, spatial_scales[0] * 0.5)
return blobs_fpn, fpn_dim, spatial_scales
def add_fpn_onto_conv_body(
model, conv_body_func, fpn_level_info_func, P2only=False
):
"""Add the specified conv body to the model and then add FPN levels to it.
"""
# Note: blobs_conv is in revsersed order: [fpn5, fpn4, fpn3, fpn2]
# similarly for dims_conv: [2048, 1024, 512, 256]
# similarly for spatial_scales_fpn: [1/32, 1/16, 1/8, 1/4]
conv_body_func(model)
blobs_fpn, dim_fpn, spatial_scales_fpn = add_fpn(
model, fpn_level_info_func()
)
if P2only:
# use only the finest level
return blobs_fpn[-1], dim_fpn, spatial_scales_fpn[-1]
else:
# use all levels
return blobs_fpn, dim_fpn, spatial_scales_fpn
def fpn_level_info_ResNet101_conv5():
return FpnLevelInfo(
blobs=('res5_2_sum', 'res4_22_sum', 'res3_3_sum', 'res2_2_sum'),
dims=(2048, 1024, 512, 256),
spatial_scales=(1. / 32., 1. / 16., 1. / 8., 1. / 4.)
)
def add_residual_block(
model,
prefix,
blob_in,
dim_in,
dim_out,
dim_inner,
dilation,
stride_init=2,
inplace_sum=False
):
"""Add a residual block to the model."""
# prefix = res<stage>_<sub_stage>, e.g., res2_3
# Max pooling is performed prior to the first stage (which is uniquely
# distinguished by dim_in = 64), thus we keep stride = 1 for the first stage
stride = stride_init if (
dim_in != dim_out and dim_in != 64 and dilation == 1
) else 1
# transformation blob
tr = globals()[cfg.RESNETS.TRANS_FUNC](
model,
blob_in,
dim_in,
dim_out,
stride,
prefix,
dim_inner,
group=cfg.RESNETS.NUM_GROUPS,
dilation=dilation
)
# sum -> ReLU
# shortcut function: by default using bn; support gn
add_shortcut = globals()[cfg.RESNETS.SHORTCUT_FUNC]
sc = add_shortcut(model, prefix, blob_in, dim_in, dim_out, stride)
if inplace_sum:
s = model.net.Sum([tr, sc], tr)
else:
s = model.net.Sum([tr, sc], prefix + '_sum')
return model.Relu(s, s)
def add_stage(
model,
prefix,
blob_in,
n,
dim_in,
dim_out,
dim_inner,
dilation,
stride_init=2
):
"""Add a ResNet stage to the model by stacking n residual blocks."""
# e.g., prefix = res2
for i in range(n):
blob_in = add_residual_block(
model,
'{}_{}'.format(prefix, i),
blob_in,
dim_in,
dim_out,
dim_inner,
dilation,
stride_init,
# Not using inplace for the last block;
# it may be fetched externally or used by FPN
inplace_sum=i < n - 1
)
dim_in = dim_out
return blob_in, dim_in
def add_ResNet_convX_body(model, block_counts, freeze_at=2):
"""Add a ResNet body from input data up through the res5 (aka conv5) stage.
The final res5/conv5 stage may be optionally excluded (hence convX, where
X = 4 or 5)."""
assert freeze_at in [0, 2, 3, 4, 5]
# add the stem (by default, conv1 and pool1 with bn; can support gn)
p, dim_in = globals()[cfg.RESNETS.STEM_FUNC](model, 'data')
dim_bottleneck = cfg.RESNETS.NUM_GROUPS * cfg.RESNETS.WIDTH_PER_GROUP
(n1, n2, n3) = block_counts[:3]
s, dim_in = add_stage(model, 'res2', p, n1, dim_in, 256, dim_bottleneck, 1)
if freeze_at == 2:
model.StopGradient(s, s)
s, dim_in = add_stage(
model, 'res3', s, n2, dim_in, 512, dim_bottleneck * 2, 1
)
if freeze_at == 3:
model.StopGradient(s, s)
s, dim_in = add_stage(
model, 'res4', s, n3, dim_in, 1024, dim_bottleneck * 4, 1
)
if freeze_at == 4:
model.StopGradient(s, s)
if len(block_counts) == 4:
n4 = block_counts[3]
s, dim_in = add_stage(
model, 'res5', s, n4, dim_in, 2048, dim_bottleneck * 8,
cfg.RESNETS.RES5_DILATION
)
if freeze_at == 5:
model.StopGradient(s, s)
return s, dim_in, 1. / 32. * cfg.RESNETS.RES5_DILATION
else:
return s, dim_in, 1. / 16.
def ResNet_add_ResNet101_conv5_body(model):
return add_ResNet_convX_body(model, (3, 4, 23, 3))
def FPN_add_fpn_ResNet101_conv5_body(model):
return add_fpn_onto_conv_body(
model, ResNet_add_ResNet101_conv5_body, fpn_level_info_ResNet101_conv5
)
def bottleneck_transformation(
model,
blob_in,
dim_in,
dim_out,
stride,
prefix,
dim_inner,
dilation=1,
group=1
):
"""Add a bottleneck transformation to the model."""
# In original resnet, stride=2 is on 1x1.
# In fb.torch resnet, stride=2 is on 3x3.
(str1x1, str3x3) = (stride, 1) if cfg.RESNETS.STRIDE_1X1 else (1, stride)
# conv 1x1 -> BN -> ReLU
cur = model.ConvAffine(
blob_in,
prefix + '_branch2a',
dim_in,
dim_inner,
kernel=1,
stride=str1x1,
pad=0,
inplace=True
)
cur = model.Relu(cur, cur)
# conv 3x3 -> BN -> ReLU
cur = model.ConvAffine(
cur,
prefix + '_branch2b',
dim_inner,
dim_inner,
kernel=3,
stride=str3x3,
pad=1 * dilation,
dilation=dilation,
group=group,
inplace=True
)
cur = model.Relu(cur, cur)
# conv 1x1 -> BN (no ReLU)
# NB: for now this AffineChannel op cannot be in-place due to a bug in C2
# gradient computation for graphs like this
cur = model.ConvAffine(
cur,
prefix + '_branch2c',
dim_inner,
dim_out,
kernel=1,
stride=1,
pad=0,
inplace=False
)
return cur
def basic_bn_shortcut(model, prefix, blob_in, dim_in, dim_out, stride):
""" For a pre-trained network that used BN. An AffineChannel op replaces BN
during fine-tuning.
"""
if dim_in == dim_out:
return blob_in
c = model.Conv(
blob_in,
prefix + '_branch1',
dim_in,
dim_out,
kernel=1,
stride=stride,
no_bias=1
)
return model.AffineChannel(c, prefix + '_branch1_bn', dim=dim_out)
def basic_bn_stem(model, data, **kwargs):
"""Add a basic ResNet stem. For a pre-trained network that used BN.
An AffineChannel op replaces BN during fine-tuning.
"""
dim = 64
p = model.Conv(data, 'conv1', 3, dim, 7, pad=3, stride=2, no_bias=1)
p = model.AffineChannel(p, 'res_conv1_bn', dim=dim, inplace=True)
p = model.Relu(p, p)
p = model.MaxPool(p, 'pool1', kernel=3, pad=1, stride=2)
return p, dim
def get_func(func_name):
"""Helper to return a function object by name. func_name must identify a
function in this module or the path to a function relative to the base
'modeling' module.
"""
if func_name == '':
return None
try:
parts = func_name.split('.')
# Refers to a function in this module
if len(parts) == 1:
return globals()[parts[0]]
# Otherwise, assume we're referencing a module under modeling
module_name = 'detectron.modeling.' + '.'.join(parts[:-1])
module = importlib.import_module(module_name)
return getattr(module, parts[-1])
except Exception:
logger.error('Failed to find function: {}'.format(func_name))
raise
def body_uv_rcnn_heads_add_roi_body_uv_head_v1convX(model, blob_in, dim_in, spatial_scale):
"""v1convX design: X * (conv)."""
hidden_dim = cfg.BODY_UV_RCNN.CONV_HEAD_DIM
kernel_size = cfg.BODY_UV_RCNN.CONV_HEAD_KERNEL
pad_size = kernel_size // 2
current = model.RoIFeatureTransform(
blob_in,
'_[body_uv]_roi_feat',
blob_rois='body_uv_rois',
method=cfg.BODY_UV_RCNN.ROI_XFORM_METHOD,
resolution=cfg.BODY_UV_RCNN.ROI_XFORM_RESOLUTION,
sampling_ratio=cfg.BODY_UV_RCNN.ROI_XFORM_SAMPLING_RATIO,
spatial_scale=spatial_scale
)
for i in range(cfg.BODY_UV_RCNN.NUM_STACKED_CONVS):
current = model.Conv(
current,
'body_conv_fcn' + str(i + 1),
dim_in,
hidden_dim,
kernel_size,
stride=1,
pad=pad_size,
weight_init=(cfg.BODY_UV_RCNN.CONV_INIT, {'std': 0.01}),
bias_init=('ConstantFill', {'value': 0.})
)
current = model.Relu(current, current)
dim_in = hidden_dim
return current, hidden_dim
def generalized_rcnn(model):
"""This model type handles:
- Fast R-CNN
- RPN only (not integrated with Fast R-CNN)
- Faster R-CNN (stagewise training from NIPS paper)
- Faster R-CNN (end-to-end joint training)
- Mask R-CNN (stagewise training from NIPS paper)
- Mask R-CNN (end-to-end joint training)
"""
return build_generic_detection_model(
model,
eval(str(cfg.MODEL.CONV_BODY).replace(".","_")),
add_roi_box_head_func=[None if cfg.FAST_RCNN.ROI_BOX_HEAD == "" else eval(str(cfg.FAST_RCNN.ROI_BOX_HEAD).replace(".","_"))][0],
add_roi_mask_head_func=[None if cfg.MRCNN.ROI_MASK_HEAD == "" else eval(str(cfg.MRCNN.ROI_MASK_HEAD).replace(".","_"))][0],
add_roi_keypoint_head_func=[None if cfg.KRCNN.ROI_KEYPOINTS_HEAD == "" else eval(str(cfg.KRCNN.ROI_KEYPOINTS_HEAD).replace(".","_"))][0],
add_roi_body_uv_head_func=[None if cfg.BODY_UV_RCNN.ROI_HEAD == "" else eval(str(cfg.BODY_UV_RCNN.ROI_HEAD).replace(".","_"))][0],
freeze_conv_body=cfg.TRAIN.FREEZE_CONV_BODY
)
def fast_rcnn_heads_add_roi_2mlp_head(model, blob_in, dim_in, spatial_scale):
"""Add a ReLU MLP with two hidden layers."""
hidden_dim = cfg.FAST_RCNN.MLP_HEAD_DIM
roi_size = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION
roi_feat = model.RoIFeatureTransform(
blob_in,
'roi_feat',
blob_rois='rois',
method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
resolution=roi_size,
sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO,
spatial_scale=spatial_scale
)
model.FC(roi_feat, 'fc6', dim_in * roi_size * roi_size, hidden_dim)
model.Relu('fc6', 'fc6')
model.FC('fc6', 'fc7', hidden_dim, hidden_dim)
model.Relu('fc7', 'fc7')
return 'fc7', hidden_dim
def model_builder_create(model_type_func, train=False, gpu_id=0):
"""Generic model creation function that dispatches to specific model
building functions.
By default, this function will generate a data parallel model configured to
run on cfg.NUM_GPUS devices. However, you can restrict it to build a model
targeted to a specific GPU by specifying gpu_id. This is used by
optimizer.build_data_parallel_model() during test time.
"""
model = DetectionModelHelper(
name=model_type_func,
train=train,
num_classes=cfg.MODEL.NUM_CLASSES,
init_params=train
)
model.only_build_forward_pass = False
model.target_gpu_id = gpu_id
return eval(str(model_type_func).replace(".","_"))(model)
def configure_bbox_reg_weights(model, saved_cfg):
"""Compatibility for old models trained with bounding box regression
mean/std normalization (instead of fixed weights).
"""
if 'MODEL' not in saved_cfg or 'BBOX_REG_WEIGHTS' not in saved_cfg.MODEL:
logger.warning('Model from weights file was trained before config key '
'MODEL.BBOX_REG_WEIGHTS was added. Forcing '
'MODEL.BBOX_REG_WEIGHTS = (1., 1., 1., 1.) to ensure '
'correct **inference** behavior.')
# Generally we don't allow modifying the config, but this is a one-off
# hack to support some very old models
is_immutable = cfg.is_immutable()
cfg.immutable(False)
cfg.MODEL.BBOX_REG_WEIGHTS = (1., 1., 1., 1.)
cfg.immutable(is_immutable)
#logger.info('New config:')
#logger.info(pprint.pformat(cfg))
assert not model.train, (
'This model was trained with an older version of the code that '
'used bounding box regression mean/std normalization. It can no '
'longer be used for training. To upgrade it to a trainable model '
'please use fb/compat/convert_bbox_reg_normalized_model.py.'
)
def load_object(file_name):
with open(file_name, 'rb') as f:
# The default encoding used while unpickling is 7-bit (ASCII.) However,
# the blobs are arbitrary 8-bit bytes which don't agree. The absolute
# correct way to do this is to use `encoding="bytes"` and then interpret
# the blob names either as ASCII, or better, as unicode utf-8. A
# reasonable fix, however, is to treat it the encoding as 8-bit latin1
# (which agrees with the first 256 characters of Unicode anyway.)
if six.PY2:
return pickle.load(f)
else:
return pickle.load(f, encoding='latin1')
def net_utils_initialize_gpu_from_weights_file(model, weights_file, gpu_id=0):
"""Initialize a network with ops on a specific GPU.
If you use CUDA_VISIBLE_DEVICES to target specific GPUs, Caffe2 will
automatically map logical GPU ids (starting from 0) to the physical GPUs
specified in CUDA_VISIBLE_DEVICES.
"""
#logger.info('Loading weights from: {}'.format(weights_file))
ws_blobs = workspace.Blobs()
src_blobs = load_object(weights_file)
if 'cfg' in src_blobs:
saved_cfg = load_cfg(src_blobs['cfg'])
configure_bbox_reg_weights(model, saved_cfg)
if 'blobs' in src_blobs:
# Backwards compat--dictionary used to be only blobs, now they are
# stored under the 'blobs' key
src_blobs = src_blobs['blobs']
# with open(weights_file, 'r') as f:
# src_blobs = pickle.load(f)
# if 'cfg' in src_blobs:
# saved_cfg = load_cfg(src_blobs['cfg'])
# configure_bbox_reg_weights(model, saved_cfg)
# if 'blobs' in src_blobs:
# # Backwards compat--dictionary used to be only blobs, now they are
# # stored under the 'blobs' key
# src_blobs = src_blobs['blobs']
# Initialize weights on GPU gpu_id only
unscoped_param_names = OrderedDict() # Print these out in model order
for blob in model.params:
unscoped_param_names[c2_utils_UnscopeName(str(blob))] = True
with c2_utils_NamedCudaScope(gpu_id):
for unscoped_param_name in unscoped_param_names.keys():
if (unscoped_param_name.find(']_') >= 0 and
unscoped_param_name not in src_blobs):
# Special case for sharing initialization from a pretrained
# model:
# If a blob named '_[xyz]_foo' is in model.params and not in
# the initialization blob dictionary, then load source blob
# 'foo' into destination blob '_[xyz]_foo'
src_name = unscoped_param_name[
unscoped_param_name.find(']_') + 2:]
else:
src_name = unscoped_param_name
if src_name not in src_blobs:
#logger.info('{:s} not found'.format(src_name))
continue
dst_name = core.ScopedName(unscoped_param_name)
has_momentum = src_name + '_momentum' in src_blobs
has_momentum_str = ' [+ momentum]' if has_momentum else ''
logger.debug(
'{:s}{:} loaded from weights file into {:s}: {}'.format(
src_name, has_momentum_str, dst_name, src_blobs[src_name]
.shape
)
)
if dst_name in ws_blobs:
# If the blob is already in the workspace, make sure that it
# matches the shape of the loaded blob
ws_blob = workspace.FetchBlob(dst_name)
assert ws_blob.shape == src_blobs[src_name].shape, \
('Workspace blob {} with shape {} does not match '
'weights file shape {}').format(
src_name,
ws_blob.shape,
src_blobs[src_name].shape)
workspace.FeedBlob(
dst_name,
src_blobs[src_name].astype(np.float32, copy=False))
if has_momentum:
workspace.FeedBlob(
dst_name + '_momentum',
src_blobs[src_name + '_momentum'].astype(
np.float32, copy=False))
# We preserve blobs that are in the weights file but not used by the current
# model. We load these into CPU memory under the '__preserve__/' namescope.
# These blobs will be stored when saving a model to a weights file. This
# feature allows for alternating optimization of Faster R-CNN in which blobs
# unused by one step can still be preserved forward and used to initialize
# another step.
for src_name in src_blobs.keys():
if (src_name not in unscoped_param_names and
not src_name.endswith('_momentum') and
src_blobs[src_name] is not None):
with c2_utils_CpuScope():
workspace.FeedBlob(
'__preserve__/{:s}'.format(src_name), src_blobs[src_name])
logger.debug(
'{:s} preserved in workspace (unused)'.format(src_name))
def infer_engine_initialize_model_from_cfg(weights_file, gpu_id=0):
"""Initialize a model from the global cfg. Loads test-time weights and
creates the networks in the Caffe2 workspace.
"""
model = model_builder_create(cfg.MODEL.TYPE, train=False, gpu_id=gpu_id)
net_utils_initialize_gpu_from_weights_file(
model, weights_file, gpu_id=gpu_id,
)
model_builder_add_inference_inputs(model)
workspace.CreateNet(model.net)
workspace.CreateNet(model.conv_body_net)
if cfg.MODEL.MASK_ON:
workspace.CreateNet(model.mask_net)
if cfg.MODEL.KEYPOINTS_ON:
workspace.CreateNet(model.keypoint_net)
if cfg.MODEL.BODY_UV_ON:
workspace.CreateNet(model.body_uv_net)
return model
def setup_logging(name):
FORMAT = '%(levelname)s %(filename)s:%(lineno)4d: %(message)s'
# Manually clear root loggers to prevent any module that may have called
# logging.basicConfig() from blocking our logging setup
logging.root.handlers = []
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(name)
return logger
def cache_url(url_or_file, cache_dir):
"""Download the file specified by the URL to the cache_dir and return the
path to the cached file. If the argument is not a URL, simply return it as
is.
"""
is_url = re.match(r'^(?:http)s?://', url_or_file, re.IGNORECASE) is not None
if not is_url:
return url_or_file
#
url = url_or_file
#
Len_filename = len( url.split('/')[-1] )
BASE_URL = url[0:-Len_filename-1]
#
cache_file_path = url.replace(BASE_URL, cache_dir)
if os.path.exists(cache_file_path):
#assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
cache_file_dir = os.path.dirname(cache_file_path)
if not os.path.exists(cache_file_dir):
os.makedirs(cache_file_dir)
#logger.info('Downloading remote file {} to {}'.format(url, cache_file_path))
download_url(url, cache_file_path)
#assert_cache_file_is_ok(url, cache_file_path)
return cache_file_path
def c2_utils_UnscopeName(possibly_scoped_name):
"""Remove any name scoping from a (possibly) scoped name. For example,
convert the name 'gpu_0/foo' to 'foo'."""
assert isinstance(possibly_scoped_name, string_types)
return possibly_scoped_name[
possibly_scoped_name.rfind(scope._NAMESCOPE_SEPARATOR) + 1:]
def _get_lr_change_ratio(cur_lr, new_lr):
eps = 1e-10
ratio = np.max(
(new_lr / np.max((cur_lr, eps)), cur_lr / np.max((new_lr, eps)))
)
return ratio
def _filter_boxes(boxes, min_size, im_info):
"""Only keep boxes with both sides >= min_size and center within the image.
"""
# Compute the width and height of the proposal boxes as measured in the original
# image coordinate system (this is required to avoid "Negative Areas Found"
# assertions in other parts of the code that measure).
im_scale = im_info[2]
ws_orig_scale = (boxes[:, 2] - boxes[:, 0]) / im_scale + 1
hs_orig_scale = (boxes[:, 3] - boxes[:, 1]) / im_scale + 1
# To avoid numerical issues we require the min_size to be at least 1 pixel in the
# original image
min_size = np.maximum(min_size, 1)
# Proposal center is computed relative to the scaled input image
ws = boxes[:, 2] - boxes[:, 0] + 1
hs = boxes[:, 3] - boxes[:, 1] + 1
x_ctr = boxes[:, 0] + ws / 2.
y_ctr = boxes[:, 1] + hs / 2.
keep = np.where(
(ws_orig_scale >= min_size)
& (hs_orig_scale >= min_size)
& (x_ctr < im_info[1])
& (y_ctr < im_info[0])
)[0]
return keep
def box_utils_clip_tiled_boxes(boxes, im_shape):
"""Clip boxes to image boundaries. im_shape is [height, width] and boxes
has shape (N, 4 * num_tiled_boxes)."""
assert boxes.shape[1] % 4 == 0, \
'boxes.shape[1] is {:d}, but must be divisible by 4.'.format(
boxes.shape[1]
)
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
def box_utils_nms(dets, thresh):
"""Apply classic DPM-style greedy NMS."""
if dets.shape[0] == 0:
return []
return cython_nms.nms(dets, thresh)
def fast_rcnn_roi_data_get_fast_rcnn_blob_names(is_training=True):
"""Fast R-CNN blob names."""
# rois blob: holds R regions of interest, each is a 5-tuple
# (batch_idx, x1, y1, x2, y2) specifying an image batch index and a
# rectangle (x1, y1, x2, y2)
blob_names = ['rois']
if is_training:
# labels_int32 blob: R categorical labels in [0, ..., K] for K
# foreground classes plus background
blob_names += ['labels_int32']
if is_training:
# bbox_targets blob: R bounding-box regression targets with 4
# targets per class
blob_names += ['bbox_targets']
# bbox_inside_weights blob: At most 4 targets per roi are active
# this binary vector sepcifies the subset of active targets
blob_names += ['bbox_inside_weights']
blob_names += ['bbox_outside_weights']
if is_training and cfg.MODEL.MASK_ON:
# 'mask_rois': RoIs sampled for training the mask prediction branch.
# Shape is (#masks, 5) in format (batch_idx, x1, y1, x2, y2).
blob_names += ['mask_rois']
# 'roi_has_mask': binary labels for the RoIs specified in 'rois'
# indicating if each RoI has a mask or not. Note that in some cases
# a *bg* RoI will have an all -1 (ignore) mask associated with it in
# the case that no fg RoIs can be sampled. Shape is (batchsize).
blob_names += ['roi_has_mask_int32']
# 'masks_int32' holds binary masks for the RoIs specified in
# 'mask_rois'. Shape is (#fg, M * M) where M is the ground truth
# mask size.
blob_names += ['masks_int32']
if is_training and cfg.MODEL.KEYPOINTS_ON:
# 'keypoint_rois': RoIs sampled for training the keypoint prediction
# branch. Shape is (#instances, 5) in format (batch_idx, x1, y1, x2,
# y2).
blob_names += ['keypoint_rois']
# 'keypoint_locations_int32': index of keypoint in
# KRCNN.HEATMAP_SIZE**2 sized array. Shape is (#instances). Used in
# SoftmaxWithLoss.
blob_names += ['keypoint_locations_int32']
# 'keypoint_weights': weight assigned to each target in
# 'keypoint_locations_int32'. Shape is (#instances). Used in
# SoftmaxWithLoss.
blob_names += ['keypoint_weights']
# 'keypoint_loss_normalizer': optional normalization factor to use if
# cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS is False.
blob_names += ['keypoint_loss_normalizer']
########################
if is_training and cfg.MODEL.BODY_UV_ON:
blob_names += ['body_uv_rois']
blob_names += ['roi_has_body_uv_int32']
#########
# ###################################################
blob_names += ['body_uv_ann_labels']
blob_names += ['body_uv_ann_weights']
# #################################################
blob_names += ['body_uv_X_points']
blob_names += ['body_uv_Y_points']
blob_names += ['body_uv_Ind_points']
blob_names += ['body_uv_I_points']
blob_names += ['body_uv_U_points']
blob_names += ['body_uv_V_points']
blob_names += ['body_uv_point_weights']
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
# Support for FPN multi-level rois without bbox reg isn't
# implemented (... and may never be implemented)
k_max = cfg.FPN.ROI_MAX_LEVEL
k_min = cfg.FPN.ROI_MIN_LEVEL
# Same format as rois blob, but one per FPN level
for lvl in range(k_min, k_max + 1):
blob_names += ['rois_fpn' + str(lvl)]
blob_names += ['rois_idx_restore_int32']
if is_training:
if cfg.MODEL.MASK_ON:
for lvl in range(k_min, k_max + 1):
blob_names += ['mask_rois_fpn' + str(lvl)]
blob_names += ['mask_rois_idx_restore_int32']
if cfg.MODEL.KEYPOINTS_ON:
for lvl in range(k_min, k_max + 1):
blob_names += ['keypoint_rois_fpn' + str(lvl)]
blob_names += ['keypoint_rois_idx_restore_int32']
if cfg.MODEL.BODY_UV_ON:
for lvl in range(k_min, k_max + 1):
blob_names += ['body_uv_rois_fpn' + str(lvl)]
blob_names += ['body_uv_rois_idx_restore_int32']
return blob_names
def blob_utils_py_op_copy_blob(blob_in, blob_out):
"""Copy a numpy ndarray given as blob_in into the Caffe2 CPUTensor blob
given as blob_out. Supports float32 and int32 blob data types. This function
is intended for copying numpy data into a Caffe2 blob in PythonOps.
"""
# Some awkward voodoo required by Caffe2 to support int32 blobs
needs_int32_init = False
try:
_ = blob.data.dtype # noqa
except Exception:
needs_int32_init = blob_in.dtype == np.int32
if needs_int32_init:
# init can only take a list (failed on tuple)
blob_out.init(list(blob_in.shape), caffe2_pb2.TensorProto.INT32)
else:
blob_out.reshape(blob_in.shape)
blob_out.data[...] = blob_in
def keypoint_rcnn_roi_data_finalize_keypoint_minibatch(blobs, valid):
"""Finalize the minibatch after blobs for all minibatch images have been
collated.
"""
min_count = cfg.KRCNN.MIN_KEYPOINT_COUNT_FOR_VALID_MINIBATCH
num_visible_keypoints = np.sum(blobs['keypoint_weights'])
valid = (
valid and len(blobs['keypoint_weights']) > 0 and
num_visible_keypoints > min_count
)
# Normalizer to use if cfg.KRCNN.NORMALIZE_BY_VISIBLE_KEYPOINTS is False.
# See modeling.model_builder.add_keypoint_losses
norm = num_visible_keypoints / (
cfg.TRAIN.IMS_PER_BATCH * cfg.TRAIN.BATCH_SIZE_PER_IM *
cfg.TRAIN.FG_FRACTION * cfg.KRCNN.NUM_KEYPOINTS
)
blobs['keypoint_loss_normalizer'] = np.array(norm, dtype=np.float32)
return valid
def fpn_add_multilevel_roi_blobs(
blobs, blob_prefix, rois, target_lvls, lvl_min, lvl_max
):
"""Add RoI blobs for multiple FPN levels to the blobs dict.
blobs: a dict mapping from blob name to numpy ndarray
blob_prefix: name prefix to use for the FPN blobs
rois: the source rois as a 2D numpy array of shape (N, 5) where each row is
an roi and the columns encode (batch_idx, x1, y1, x2, y2)
target_lvls: numpy array of shape (N, ) indicating which FPN level each roi
in rois should be assigned to
lvl_min: the finest (highest resolution) FPN level (e.g., 2)
lvl_max: the coarest (lowest resolution) FPN level (e.g., 6)
"""
rois_idx_order = np.empty((0, ))
rois_stacked = np.zeros((0, 5), dtype=np.float32) # for assert
for lvl in range(lvl_min, lvl_max + 1):
idx_lvl = np.where(target_lvls == lvl)[0]
blobs[blob_prefix + '_fpn' + str(lvl)] = rois[idx_lvl, :]
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_stacked = np.vstack(
[rois_stacked, blobs[blob_prefix + '_fpn' + str(lvl)]]
)
rois_idx_restore = np.argsort(rois_idx_order).astype(np.int32, copy=False)
blobs[blob_prefix + '_idx_restore_int32'] = rois_idx_restore
# Sanity check that restore order is correct
assert (rois_stacked[rois_idx_restore] == rois).all()
def _add_multilevel_rois(blobs):
"""By default training RoIs are added for a single feature map level only.
When using FPN, the RoIs must be distributed over different FPN levels
according the level assignment heuristic (see: modeling.FPN.
map_rois_to_fpn_levels).
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
def _distribute_rois_over_fpn_levels(rois_blob_name):
"""Distribute rois over the different FPN levels."""
# Get target level for each roi
# Recall blob rois are in (batch_idx, x1, y1, x2, y2) format, hence take
# the box coordinates from columns 1:5
target_lvls = fpn_map_rois_to_fpn_levels(
blobs[rois_blob_name][:, 1:5], lvl_min, lvl_max
)
# Add per FPN level roi blobs named like: <rois_blob_name>_fpn<lvl>
fpn_add_multilevel_roi_blobs(
blobs, rois_blob_name, blobs[rois_blob_name], target_lvls, lvl_min,
lvl_max
)
_distribute_rois_over_fpn_levels('rois')
if cfg.MODEL.MASK_ON:
_distribute_rois_over_fpn_levels('mask_rois')
if cfg.MODEL.KEYPOINTS_ON:
_distribute_rois_over_fpn_levels('keypoint_rois')
if cfg.MODEL.BODY_UV_ON:
_distribute_rois_over_fpn_levels('body_uv_rois')
def segm_utils_GetDensePoseMask(Polys):
MaskGen = np.zeros([256,256])
for i in range(1,15):
if(Polys[i-1]):
current_mask = mask_util.decode(Polys[i-1])
MaskGen[current_mask>0] = i
return MaskGen
def body_uv_rcnn_roi_data_add_body_uv_rcnn_blobs(blobs, sampled_boxes, roidb, im_scale, batch_idx):
IsFlipped = roidb['flipped']
M = cfg.BODY_UV_RCNN.HEATMAP_SIZE
#
polys_gt_inds = np.where(roidb['ignore_UV_body'] == 0)[0]
boxes_from_polys = [roidb['boxes'][i,:] for i in polys_gt_inds]
if not(boxes_from_polys):
pass
else:
boxes_from_polys = np.vstack(boxes_from_polys)
boxes_from_polys = np.array(boxes_from_polys)
fg_inds = np.where(blobs['labels_int32'] > 0)[0]
roi_has_mask = np.zeros( blobs['labels_int32'].shape )
if (bool(boxes_from_polys.any()) & (fg_inds.shape[0] > 0) ):
rois_fg = sampled_boxes[fg_inds]
#
rois_fg.astype(np.float32, copy=False)
boxes_from_polys.astype(np.float32, copy=False)
#
overlaps_bbfg_bbpolys = box_utils_bbox_overlaps(
rois_fg.astype(np.float32, copy=False),
boxes_from_polys.astype(np.float32, copy=False))
fg_polys_value = np.max(overlaps_bbfg_bbpolys, axis=1)
fg_inds = fg_inds[fg_polys_value>0.7]
if (bool(boxes_from_polys.any()) & (fg_inds.shape[0] > 0) ):
for jj in fg_inds:
roi_has_mask[jj] = 1
# Create blobs for densepose supervision.
################################################## The mask
All_labels = blob_utils_zeros((fg_inds.shape[0], M ** 2), int32=True)
All_Weights = blob_utils_zeros((fg_inds.shape[0], M ** 2), int32=True)
################################################# The points
X_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=False)
Y_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=False)
Ind_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=True)
I_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=True)
U_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=False)
V_points = blob_utils_zeros((fg_inds.shape[0], 196), int32=False)
Uv_point_weights = blob_utils_zeros((fg_inds.shape[0], 196), int32=False)
#################################################
rois_fg = sampled_boxes[fg_inds]
overlaps_bbfg_bbpolys = box_utils_bbox_overlaps(
rois_fg.astype(np.float32, copy=False),
boxes_from_polys.astype(np.float32, copy=False))
fg_polys_inds = np.argmax(overlaps_bbfg_bbpolys, axis=1)
for i in range(rois_fg.shape[0]):
#
fg_polys_ind = polys_gt_inds[ fg_polys_inds[i] ]
#
Ilabel = segm_utils_GetDensePoseMask( roidb['dp_masks'][ fg_polys_ind ] )
#
GT_I = np.array(roidb['dp_I'][ fg_polys_ind ])
GT_U = np.array(roidb['dp_U'][ fg_polys_ind ])
GT_V = np.array(roidb['dp_V'][ fg_polys_ind ])
GT_x = np.array(roidb['dp_x'][ fg_polys_ind ])
GT_y = np.array(roidb['dp_y'][ fg_polys_ind ])
GT_weights = np.ones(GT_I.shape).astype(np.float32)
#
## Do the flipping of the densepose annotation !
if(IsFlipped):
GT_I,GT_U,GT_V,GT_x,GT_y,Ilabel = DP.get_symmetric_densepose(GT_I,GT_U,GT_V,GT_x,GT_y,Ilabel)
#
roi_fg = rois_fg[i]
roi_gt = boxes_from_polys[fg_polys_inds[i],:]
#
x1 = roi_fg[0] ; x2 = roi_fg[2]
y1 = roi_fg[1] ; y2 = roi_fg[3]
#
x1_source = roi_gt[0]; x2_source = roi_gt[2]
y1_source = roi_gt[1]; y2_source = roi_gt[3]
#
x_targets = ( np.arange(x1,x2, (x2 - x1)/M ) - x1_source ) * ( 256. / (x2_source-x1_source) )
y_targets = ( np.arange(y1,y2, (y2 - y1)/M ) - y1_source ) * ( 256. / (y2_source-y1_source) )
#
x_targets = x_targets[0:M] ## Strangely sometimes it can be M+1, so make sure size is OK!
y_targets = y_targets[0:M]
#
[X_targets,Y_targets] = np.meshgrid( x_targets, y_targets )
New_Index = cv2.remap(Ilabel,X_targets.astype(np.float32), Y_targets.astype(np.float32), interpolation=cv2.INTER_NEAREST, borderMode= cv2.BORDER_CONSTANT, borderValue=(0))
#
All_L = np.zeros(New_Index.shape)
All_W = np.ones(New_Index.shape)
#
All_L = New_Index
#
gt_length_x = x2_source - x1_source
gt_length_y = y2_source - y1_source
#
GT_y = (( GT_y / 256. * gt_length_y ) + y1_source - y1 ) * ( M / ( y2 - y1 ) )
GT_x = (( GT_x / 256. * gt_length_x ) + x1_source - x1 ) * ( M / ( x2 - x1 ) )
#
GT_I[GT_y<0] = 0
GT_I[GT_y>(M-1)] = 0
GT_I[GT_x<0] = 0
GT_I[GT_x>(M-1)] = 0
#
points_inside = GT_I>0
GT_U = GT_U[points_inside]
GT_V = GT_V[points_inside]
GT_x = GT_x[points_inside]
GT_y = GT_y[points_inside]
GT_weights = GT_weights[points_inside]
GT_I = GT_I[points_inside]
#
X_points[i, 0:len(GT_x)] = GT_x
Y_points[i, 0:len(GT_y)] = GT_y
Ind_points[i, 0:len(GT_I)] = i
I_points[i, 0:len(GT_I)] = GT_I
U_points[i, 0:len(GT_U)] = GT_U
V_points[i, 0:len(GT_V)] = GT_V
Uv_point_weights[i, 0:len(GT_weights)] = GT_weights
#
All_labels[i, :] = np.reshape(All_L.astype(np.int32), M ** 2)
All_Weights[i, :] = np.reshape(All_W.astype(np.int32), M ** 2)
##
else:
bg_inds = np.where(blobs['labels_int32'] == 0)[0]
#
if(len(bg_inds)==0):
rois_fg = sampled_boxes[0].reshape((1, -1))
else:
rois_fg = sampled_boxes[bg_inds[0]].reshape((1, -1))
roi_has_mask[0] = 1
#
X_points = blob_utils_zeros((1, 196), int32=False)
Y_points = blob_utils_zeros((1, 196), int32=False)
Ind_points = blob_utils_zeros((1, 196), int32=True)
I_points = blob_utils_zeros((1,196), int32=True)
U_points = blob_utils_zeros((1, 196), int32=False)
V_points = blob_utils_zeros((1, 196), int32=False)
Uv_point_weights = blob_utils_zeros((1, 196), int32=False)
#
All_labels = -blob_utils_ones((1, M ** 2), int32=True) * 0 ## zeros
All_Weights = -blob_utils_ones((1, M ** 2), int32=True) * 0 ## zeros
#
rois_fg *= im_scale
repeated_batch_idx = batch_idx * blob_utils_ones((rois_fg.shape[0], 1))
rois_fg = np.hstack((repeated_batch_idx, rois_fg))
#
K = cfg.BODY_UV_RCNN.NUM_PATCHES
#
U_points = np.tile( U_points , [1,K+1] )
V_points = np.tile( V_points , [1,K+1] )
Uv_Weight_Points = np.zeros(U_points.shape)
#
for jjj in range(1,K+1):
Uv_Weight_Points[ : , jjj * I_points.shape[1] : (jjj+1) * I_points.shape[1] ] = ( I_points == jjj ).astype(np.float32)
#
################
# Update blobs dict with Mask R-CNN blobs
###############
#
blobs['body_uv_rois'] = np.array(rois_fg)
blobs['roi_has_body_uv_int32'] = np.array(roi_has_mask).astype(np.int32)
##
blobs['body_uv_ann_labels'] = np.array(All_labels).astype(np.int32)
blobs['body_uv_ann_weights'] = np.array(All_Weights).astype(np.float32)
#
##########################
blobs['body_uv_X_points'] = X_points.astype(np.float32)
blobs['body_uv_Y_points'] = Y_points.astype(np.float32)
blobs['body_uv_Ind_points'] = Ind_points.astype(np.float32)
blobs['body_uv_I_points'] = I_points.astype(np.float32)
blobs['body_uv_U_points'] = U_points.astype(np.float32) #### VERY IMPORTANT : These are switched here :
blobs['body_uv_V_points'] = V_points.astype(np.float32)
blobs['body_uv_point_weights'] = Uv_Weight_Points.astype(np.float32)
###################
def keypoint_utils_keypoints_to_heatmap_labels(keypoints, rois):
"""Encode keypoint location in the target heatmap for use in
SoftmaxWithLoss.
"""
# Maps keypoints from the half-open interval [x1, x2) on continuous image
# coordinates to the closed interval [0, HEATMAP_SIZE - 1] on discrete image
# coordinates. We use the continuous <-> discrete conversion from Heckbert
# 1990 ("What is the coordinate of a pixel?"): d = floor(c) and c = d + 0.5,
# where d is a discrete coordinate and c is a continuous coordinate.
assert keypoints.shape[2] == cfg.KRCNN.NUM_KEYPOINTS
shape = (len(rois), cfg.KRCNN.NUM_KEYPOINTS)
heatmaps = blob_utils_zeros(shape)
weights = blob_utils_zeros(shape)
offset_x = rois[:, 0]
offset_y = rois[:, 1]
scale_x = cfg.KRCNN.HEATMAP_SIZE / (rois[:, 2] - rois[:, 0])
scale_y = cfg.KRCNN.HEATMAP_SIZE / (rois[:, 3] - rois[:, 1])
for kp in range(keypoints.shape[2]):
vis = keypoints[:, 2, kp] > 0
x = keypoints[:, 0, kp].astype(np.float32)
y = keypoints[:, 1, kp].astype(np.float32)
# Since we use floor below, if a keypoint is exactly on the roi's right
# or bottom boundary, we shift it in by eps (conceptually) to keep it in
# the ground truth heatmap.
x_boundary_inds = np.where(x == rois[:, 2])[0]
y_boundary_inds = np.where(y == rois[:, 3])[0]
x = (x - offset_x) * scale_x
x = np.floor(x)
if len(x_boundary_inds) > 0:
x[x_boundary_inds] = cfg.KRCNN.HEATMAP_SIZE - 1
y = (y - offset_y) * scale_y
y = np.floor(y)
if len(y_boundary_inds) > 0:
y[y_boundary_inds] = cfg.KRCNN.HEATMAP_SIZE - 1
valid_loc = np.logical_and(
np.logical_and(x >= 0, y >= 0),
np.logical_and(
x < cfg.KRCNN.HEATMAP_SIZE, y < cfg.KRCNN.HEATMAP_SIZE))
valid = np.logical_and(valid_loc, vis)
valid = valid.astype(np.int32)
lin_ind = y * cfg.KRCNN.HEATMAP_SIZE + x
heatmaps[:, kp] = lin_ind * valid
weights[:, kp] = valid
return heatmaps, weights
def _within_box(points, boxes):
"""Validate which keypoints are contained inside a given box.
points: Nx2xK
boxes: Nx4
output: NxK
"""
x_within = np.logical_and(
points[:, 0, :] >= np.expand_dims(boxes[:, 0], axis=1),
points[:, 0, :] <= np.expand_dims(boxes[:, 2], axis=1)
)
y_within = np.logical_and(
points[:, 1, :] >= np.expand_dims(boxes[:, 1], axis=1),
points[:, 1, :] <= np.expand_dims(boxes[:, 3], axis=1)
)
return np.logical_and(x_within, y_within)
def keypoint_rcnn_roi_data_add_keypoint_rcnn_blobs(
blobs, roidb, fg_rois_per_image, fg_inds, im_scale, batch_idx
):
"""Add Mask R-CNN keypoint specific blobs to the given blobs dictionary."""
# Note: gt_inds must match how they're computed in
# datasets.json_dataset._merge_proposal_boxes_into_roidb
gt_inds = np.where(roidb['gt_classes'] > 0)[0]
max_overlaps = roidb['max_overlaps']
gt_keypoints = roidb['gt_keypoints']
ind_kp = gt_inds[roidb['box_to_gt_ind_map']]
within_box = _within_box(gt_keypoints[ind_kp, :, :], roidb['boxes'])
vis_kp = gt_keypoints[ind_kp, 2, :] > 0
is_visible = np.sum(np.logical_and(vis_kp, within_box), axis=1) > 0
kp_fg_inds = np.where(
np.logical_and(max_overlaps >= cfg.TRAIN.FG_THRESH, is_visible)
)[0]
kp_fg_rois_per_this_image = np.minimum(fg_rois_per_image, kp_fg_inds.size)
if kp_fg_inds.size > kp_fg_rois_per_this_image:
kp_fg_inds = np.random.choice(
kp_fg_inds, size=kp_fg_rois_per_this_image, replace=False
)
sampled_fg_rois = roidb['boxes'][kp_fg_inds]
box_to_gt_ind_map = roidb['box_to_gt_ind_map'][kp_fg_inds]
num_keypoints = gt_keypoints.shape[2]
sampled_keypoints = -np.ones(
(len(sampled_fg_rois), gt_keypoints.shape[1], num_keypoints),
dtype=gt_keypoints.dtype
)
for ii in range(len(sampled_fg_rois)):
ind = box_to_gt_ind_map[ii]
if ind >= 0:
sampled_keypoints[ii, :, :] = gt_keypoints[gt_inds[ind], :, :]
assert np.sum(sampled_keypoints[ii, 2, :]) > 0
heats, weights = keypoint_utils_keypoints_to_heatmap_labels(
sampled_keypoints, sampled_fg_rois
)
shape = (sampled_fg_rois.shape[0] * cfg.KRCNN.NUM_KEYPOINTS, 1)
heats = heats.reshape(shape)
weights = weights.reshape(shape)
sampled_fg_rois *= im_scale
repeated_batch_idx = batch_idx * blob_utils_ones(
(sampled_fg_rois.shape[0], 1)
)
sampled_fg_rois = np.hstack((repeated_batch_idx, sampled_fg_rois))
blobs['keypoint_rois'] = sampled_fg_rois
blobs['keypoint_locations_int32'] = heats.astype(np.int32, copy=False)
blobs['keypoint_weights'] = weights
def _expand_to_class_specific_mask_targets(masks, mask_class_labels):
"""Expand masks from shape (#masks, M ** 2) to (#masks, #classes * M ** 2)
to encode class specific mask targets.
"""
assert masks.shape[0] == mask_class_labels.shape[0]
M = cfg.MRCNN.RESOLUTION
# Target values of -1 are "don't care" / ignore labels
mask_targets = -blob_utils_ones(
(masks.shape[0], cfg.MODEL.NUM_CLASSES * M**2), int32=True
)
for i in range(masks.shape[0]):
cls = int(mask_class_labels[i])
start = M**2 * cls
end = start + M**2
# Ignore background instance
# (only happens when there is no fg samples in an image)
if cls > 0:
mask_targets[i, start:end] = masks[i, :]
return mask_targets
def segm_utils_polys_to_mask_wrt_box(polygons, box, M):
"""Convert from the COCO polygon segmentation format to a binary mask
encoded as a 2D array of data type numpy.float32. The polygon segmentation
is understood to be enclosed in the given box and rasterized to an M x M
mask. The resulting mask is therefore of shape (M, M).
"""
w = box[2] - box[0]
h = box[3] - box[1]
w = np.maximum(w, 1)
h = np.maximum(h, 1)
polygons_norm = []
for poly in polygons:
p = np.array(poly, dtype=np.float32)
p[0::2] = (p[0::2] - box[0]) * M / w
p[1::2] = (p[1::2] - box[1]) * M / h
polygons_norm.append(p)
rle = mask_util.frPyObjects(polygons_norm, M, M)
mask = np.array(mask_util.decode(rle), dtype=np.float32)
# Flatten in case polygons was a list
mask = np.sum(mask, axis=2)
mask = np.array(mask > 0, dtype=np.float32)
return mask
def segm_utils_polys_to_boxes(polys):
"""Convert a list of polygons into an array of tight bounding boxes."""
boxes_from_polys = np.zeros((len(polys), 4), dtype=np.float32)
for i in range(len(polys)):
poly = polys[i]
x0 = min(min(p[::2]) for p in poly)
x1 = max(max(p[::2]) for p in poly)
y0 = min(min(p[1::2]) for p in poly)
y1 = max(max(p[1::2]) for p in poly)
boxes_from_polys[i, :] = [x0, y0, x1, y1]
return boxes_from_polys
def mask_rcnn_roi_data_add_mask_rcnn_blobs(blobs, sampled_boxes, roidb, im_scale, batch_idx):
"""Add Mask R-CNN specific blobs to the input blob dictionary."""
# Prepare the mask targets by associating one gt mask to each training roi
# that has a fg (non-bg) class label.
M = cfg.MRCNN.RESOLUTION
polys_gt_inds = np.where(
(roidb['gt_classes'] > 0) & (roidb['is_crowd'] == 0)
)[0]
polys_gt = [roidb['segms'][i] for i in polys_gt_inds]
boxes_from_polys = segm_utils_polys_to_boxes(polys_gt)
fg_inds = np.where(blobs['labels_int32'] > 0)[0]
roi_has_mask = blobs['labels_int32'].copy()
roi_has_mask[roi_has_mask > 0] = 1
if fg_inds.shape[0] > 0:
# Class labels for the foreground rois
mask_class_labels = blobs['labels_int32'][fg_inds]
masks = blob_utils_zeros((fg_inds.shape[0], M**2), int32=True)
# Find overlap between all foreground rois and the bounding boxes
# enclosing each segmentation
rois_fg = sampled_boxes[fg_inds]
overlaps_bbfg_bbpolys = box_utils_bbox_overlaps(
rois_fg.astype(np.float32, copy=False),
boxes_from_polys.astype(np.float32, copy=False)
)
# Map from each fg rois to the index of the mask with highest overlap
# (measured by bbox overlap)
fg_polys_inds = np.argmax(overlaps_bbfg_bbpolys, axis=1)
# add fg targets
for i in range(rois_fg.shape[0]):
fg_polys_ind = fg_polys_inds[i]
poly_gt = polys_gt[fg_polys_ind]
roi_fg = rois_fg[i]
# Rasterize the portion of the polygon mask within the given fg roi
# to an M x M binary image
mask = segm_utils_polys_to_mask_wrt_box(poly_gt, roi_fg, M)
mask = np.array(mask > 0, dtype=np.int32) # Ensure it's binary
masks[i, :] = np.reshape(mask, M**2)
else: # If there are no fg masks (it does happen)
# The network cannot handle empty blobs, so we must provide a mask
# We simply take the first bg roi, given it an all -1's mask (ignore
# label), and label it with class zero (bg).
bg_inds = np.where(blobs['labels_int32'] == 0)[0]
# rois_fg is actually one background roi, but that's ok because ...
rois_fg = sampled_boxes[bg_inds[0]].reshape((1, -1))
# We give it an -1's blob (ignore label)
masks = -blob_utils_ones((1, M**2), int32=True)
# We label it with class = 0 (background)
mask_class_labels = blob_utils_zeros((1, ))
# Mark that the first roi has a mask
roi_has_mask[0] = 1
if cfg.MRCNN.CLS_SPECIFIC_MASK:
masks = _expand_to_class_specific_mask_targets(masks, mask_class_labels)
# Scale rois_fg and format as (batch_idx, x1, y1, x2, y2)
rois_fg *= im_scale
repeated_batch_idx = batch_idx * blob_utils_ones((rois_fg.shape[0], 1))
rois_fg = np.hstack((repeated_batch_idx, rois_fg))
# Update blobs dict with Mask R-CNN blobs
blobs['mask_rois'] = rois_fg
blobs['roi_has_mask_int32'] = roi_has_mask
blobs['masks_int32'] = masks
def blob_utils_ones(shape, int32=False):
"""Return a blob of all ones of the given shape with the correct float or
int data type.
"""
return np.ones(shape, dtype=np.int32 if int32 else np.float32)
def blob_utils_zeros(shape, int32=False):
"""Return a blob of all zeros of the given shape with the correct float or
int data type.
"""
return np.zeros(shape, dtype=np.int32 if int32 else np.float32)
def _expand_bbox_targets(bbox_target_data):
"""Bounding-box regression targets are stored in a compact form in the
roidb.
This function expands those targets into the 4-of-4*K representation used
by the network (i.e. only one class has non-zero targets). The loss weights
are similarly expanded.
Returns:
bbox_target_data (ndarray): N x 4K blob of regression targets
bbox_inside_weights (ndarray): N x 4K blob of loss weights
"""
num_bbox_reg_classes = cfg.MODEL.NUM_CLASSES
if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG:
num_bbox_reg_classes = 2 # bg and fg
clss = bbox_target_data[:, 0]
bbox_targets = blob_utils_zeros((clss.size, 4 * num_bbox_reg_classes))
bbox_inside_weights = blob_utils_zeros(bbox_targets.shape)
inds = np.where(clss > 0)[0]
for ind in inds:
cls = int(clss[ind])
start = 4 * cls
end = start + 4
bbox_targets[ind, start:end] = bbox_target_data[ind, 1:]
bbox_inside_weights[ind, start:end] = (1.0, 1.0, 1.0, 1.0)
return bbox_targets, bbox_inside_weights
def _sample_rois(roidb, im_scale, batch_idx):
"""Generate a random sample of RoIs comprising foreground and background
examples.
"""
rois_per_image = int(cfg.TRAIN.BATCH_SIZE_PER_IM)
fg_rois_per_image = int(np.round(cfg.TRAIN.FG_FRACTION * rois_per_image))
max_overlaps = roidb['max_overlaps']
# Select foreground RoIs as those with >= FG_THRESH overlap
fg_inds = np.where(max_overlaps >= cfg.TRAIN.FG_THRESH)[0]
# Guard against the case when an image has fewer than fg_rois_per_image
# foreground RoIs
fg_rois_per_this_image = np.minimum(fg_rois_per_image, fg_inds.size)
# Sample foreground regions without replacement
if fg_inds.size > 0:
fg_inds = npr.choice(
fg_inds, size=fg_rois_per_this_image, replace=False
)
# Select background RoIs as those within [BG_THRESH_LO, BG_THRESH_HI)
bg_inds = np.where(
(max_overlaps < cfg.TRAIN.BG_THRESH_HI) &
(max_overlaps >= cfg.TRAIN.BG_THRESH_LO)
)[0]
# Compute number of background RoIs to take from this image (guarding
# against there being fewer than desired)
bg_rois_per_this_image = rois_per_image - fg_rois_per_this_image
bg_rois_per_this_image = np.minimum(bg_rois_per_this_image, bg_inds.size)
# Sample foreground regions without replacement
if bg_inds.size > 0:
bg_inds = npr.choice(
bg_inds, size=bg_rois_per_this_image, replace=False
)
# The indices that we're selecting (both fg and bg)
keep_inds = np.append(fg_inds, bg_inds)
# Label is the class each RoI has max overlap with
sampled_labels = roidb['max_classes'][keep_inds]
sampled_labels[fg_rois_per_this_image:] = 0 # Label bg RoIs with class 0
sampled_boxes = roidb['boxes'][keep_inds]
bbox_targets, bbox_inside_weights = _expand_bbox_targets(
roidb['bbox_targets'][keep_inds, :]
)
bbox_outside_weights = np.array(
bbox_inside_weights > 0, dtype=bbox_inside_weights.dtype
)
# Scale rois and format as (batch_idx, x1, y1, x2, y2)
sampled_rois = sampled_boxes * im_scale
repeated_batch_idx = batch_idx * blob_utils_ones((sampled_rois.shape[0], 1))
sampled_rois = np.hstack((repeated_batch_idx, sampled_rois))
# Base Fast R-CNN blobs
blob_dict = dict(
labels_int32=sampled_labels.astype(np.int32, copy=False),
rois=sampled_rois,
bbox_targets=bbox_targets,
bbox_inside_weights=bbox_inside_weights,
bbox_outside_weights=bbox_outside_weights
)
# Optionally add Mask R-CNN blobs
if cfg.MODEL.MASK_ON:
mask_rcnn_roi_data_add_mask_rcnn_blobs(
blob_dict, sampled_boxes, roidb, im_scale, batch_idx
)
# Optionally add Keypoint R-CNN blobs
if cfg.MODEL.KEYPOINTS_ON:
keypoint_rcnn_roi_data_add_keypoint_rcnn_blobs(
blob_dict, roidb, fg_rois_per_image, fg_inds, im_scale, batch_idx
)
# Optionally body UV R-CNN blobs
if cfg.MODEL.BODY_UV_ON:
body_uv_rcnn_roi_data_add_body_uv_rcnn_blobs(
blob_dict, sampled_boxes, roidb, im_scale, batch_idx
)
return blob_dict
def fast_rcnn_roi_data_add_fast_rcnn_blobs(blobs, im_scales, roidb):
"""Add blobs needed for training Fast R-CNN style models."""
# Sample training RoIs from each image and append them to the blob lists
for im_i, entry in enumerate(roidb):
frcn_blobs = _sample_rois(entry, im_scales[im_i], im_i)
for k, v in frcn_blobs.items():
blobs[k].append(v)
# Concat the training blob lists into tensors
for k, v in blobs.items():
if isinstance(v, list) and len(v) > 0:
blobs[k] = np.concatenate(v)
# Add FPN multilevel training RoIs, if configured
if cfg.FPN.FPN_ON and cfg.FPN.MULTILEVEL_ROIS:
_add_multilevel_rois(blobs)
# Perform any final work and validity checks after the collating blobs for
# all minibatch images
valid = True
if cfg.MODEL.KEYPOINTS_ON:
valid = keypoint_rcnn_roi_data_finalize_keypoint_minibatch(blobs, valid)
return valid
def box_utils_bbox_transform_inv(boxes, gt_boxes, weights=(1.0, 1.0, 1.0, 1.0)):
"""Inverse transform that computes target bounding-box regression deltas
given proposal boxes and ground-truth boxes. The weights argument should be
a 4-tuple of multiplicative weights that are applied to the regression
target.
In older versions of this code (and in py-faster-rcnn), the weights were set
such that the regression deltas would have unit standard deviation on the
training dataset. Presently, rather than computing these statistics exactly,
we use a fixed set of weights (10., 10., 5., 5.) by default. These are
approximately the weights one would get from COCO using the previous unit
stdev heuristic.
"""
ex_widths = boxes[:, 2] - boxes[:, 0] + 1.0
ex_heights = boxes[:, 3] - boxes[:, 1] + 1.0
ex_ctr_x = boxes[:, 0] + 0.5 * ex_widths
ex_ctr_y = boxes[:, 1] + 0.5 * ex_heights
gt_widths = gt_boxes[:, 2] - gt_boxes[:, 0] + 1.0
gt_heights = gt_boxes[:, 3] - gt_boxes[:, 1] + 1.0
gt_ctr_x = gt_boxes[:, 0] + 0.5 * gt_widths
gt_ctr_y = gt_boxes[:, 1] + 0.5 * gt_heights
wx, wy, ww, wh = weights
targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths
targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights
targets_dw = ww * np.log(gt_widths / ex_widths)
targets_dh = wh * np.log(gt_heights / ex_heights)
targets = np.vstack((targets_dx, targets_dy, targets_dw,
targets_dh)).transpose()
return targets
def compute_bbox_regression_targets(entry):
"""Compute bounding-box regression targets for an image."""
# Indices of ground-truth ROIs
rois = entry['boxes']
overlaps = entry['max_overlaps']
labels = entry['max_classes']
gt_inds = np.where((entry['gt_classes'] > 0) & (entry['is_crowd'] == 0))[0]
# Targets has format (class, tx, ty, tw, th)
targets = np.zeros((rois.shape[0], 5), dtype=np.float32)
if len(gt_inds) == 0:
# Bail if the image has no ground-truth ROIs
return targets
# Indices of examples for which we try to make predictions
ex_inds = np.where(overlaps >= cfg.TRAIN.BBOX_THRESH)[0]
# Get IoU overlap between each ex ROI and gt ROI
ex_gt_overlaps = box_utils_bbox_overlaps(
rois[ex_inds, :].astype(dtype=np.float32, copy=False),
rois[gt_inds, :].astype(dtype=np.float32, copy=False))
# Find which gt ROI each ex ROI has max overlap with:
# this will be the ex ROI's gt target
gt_assignment = ex_gt_overlaps.argmax(axis=1)
gt_rois = rois[gt_inds[gt_assignment], :]
ex_rois = rois[ex_inds, :]
# Use class "1" for all boxes if using class_agnostic_bbox_reg
targets[ex_inds, 0] = (
1 if cfg.MODEL.CLS_AGNOSTIC_BBOX_REG else labels[ex_inds])
targets[ex_inds, 1:] = box_utils_bbox_transform_inv(
ex_rois, gt_rois, cfg.MODEL.BBOX_REG_WEIGHTS)
return targets
def roidb_utils_add_bbox_regression_targets(roidb):
"""Add information needed to train bounding-box regressors."""
for entry in roidb:
entry['bbox_targets'] = compute_bbox_regression_targets(entry)
def _add_class_assignments(roidb):
"""Compute object category assignment for each box associated with each
roidb entry.
"""
for entry in roidb:
gt_overlaps = entry['gt_overlaps'].toarray()
# max overlap with gt over classes (columns)
max_overlaps = gt_overlaps.max(axis=1)
# gt class that had the max overlap
max_classes = gt_overlaps.argmax(axis=1)
entry['max_classes'] = max_classes
entry['max_overlaps'] = max_overlaps
# sanity checks
# if max overlap is 0, the class must be background (class 0)
zero_inds = np.where(max_overlaps == 0)[0]
assert all(max_classes[zero_inds] == 0)
# if max overlap > 0, the class must be a fg class (not class 0)
nonzero_inds = np.where(max_overlaps > 0)[0]
assert all(max_classes[nonzero_inds] != 0)
def box_utils_xyxy_to_xywh(xyxy):
"""Convert [x1 y1 x2 y2] box format to [x1 y1 w h] format."""
if isinstance(xyxy, (list, tuple)):
# Single box given as a list of coordinates
assert len(xyxy) == 4
x1, y1 = xyxy[0], xyxy[1]
w = xyxy[2] - x1 + 1
h = xyxy[3] - y1 + 1
return (x1, y1, w, h)
elif isinstance(xyxy, np.ndarray):
# Multiple boxes given as a 2D ndarray
return np.hstack((xyxy[:, 0:2], xyxy[:, 2:4] - xyxy[:, 0:2] + 1))
else:
raise TypeError('Argument xyxy must be a list, tuple, or numpy array.')
def _filter_crowd_proposals(roidb, crowd_thresh):
"""Finds proposals that are inside crowd regions and marks them as
overlap = -1 with each ground-truth rois, which means they will be excluded
from training.
"""
for entry in roidb:
gt_overlaps = entry['gt_overlaps'].toarray()
crowd_inds = np.where(entry['is_crowd'] == 1)[0]
non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
if len(crowd_inds) == 0 or len(non_gt_inds) == 0:
continue
crowd_boxes = box_utils_xyxy_to_xywh(entry['boxes'][crowd_inds, :])
non_gt_boxes = box_utils_xyxy_to_xywh(entry['boxes'][non_gt_inds, :])
iscrowd_flags = [int(True)] * len(crowd_inds)
ious = COCOmask.iou(non_gt_boxes, crowd_boxes, iscrowd_flags)
bad_inds = np.where(ious.max(axis=1) > crowd_thresh)[0]
gt_overlaps[non_gt_inds[bad_inds], :] = -1
entry['gt_overlaps'] = scipy.sparse.csr_matrix(gt_overlaps)
def _merge_proposal_boxes_into_roidb(roidb, box_list):
"""Add proposal boxes to each roidb entry."""
assert len(box_list) == len(roidb)
for i, entry in enumerate(roidb):
boxes = box_list[i]
num_boxes = boxes.shape[0]
gt_overlaps = np.zeros(
(num_boxes, entry['gt_overlaps'].shape[1]),
dtype=entry['gt_overlaps'].dtype
)
box_to_gt_ind_map = -np.ones(
(num_boxes), dtype=entry['box_to_gt_ind_map'].dtype
)
# Note: unlike in other places, here we intentionally include all gt
# rois, even ones marked as crowd. Boxes that overlap with crowds will
# be filtered out later (see: _filter_crowd_proposals).
gt_inds = np.where(entry['gt_classes'] > 0)[0]
if len(gt_inds) > 0:
gt_boxes = entry['boxes'][gt_inds, :]
gt_classes = entry['gt_classes'][gt_inds]
proposal_to_gt_overlaps = box_utils_bbox_overlaps(
boxes.astype(dtype=np.float32, copy=False),
gt_boxes.astype(dtype=np.float32, copy=False)
)
# Gt box that overlaps each input box the most
# (ties are broken arbitrarily by class order)
argmaxes = proposal_to_gt_overlaps.argmax(axis=1)
# Amount of that overlap
maxes = proposal_to_gt_overlaps.max(axis=1)
# Those boxes with non-zero overlap with gt boxes
I = np.where(maxes > 0)[0]
# Record max overlaps with the class of the appropriate gt box
gt_overlaps[I, gt_classes[argmaxes[I]]] = maxes[I]
box_to_gt_ind_map[I] = gt_inds[argmaxes[I]]
entry['boxes'] = np.append(
entry['boxes'],
boxes.astype(entry['boxes'].dtype, copy=False),
axis=0
)
entry['gt_classes'] = np.append(
entry['gt_classes'],
np.zeros((num_boxes), dtype=entry['gt_classes'].dtype)
)
entry['seg_areas'] = np.append(
entry['seg_areas'],
np.zeros((num_boxes), dtype=entry['seg_areas'].dtype)
)
entry['gt_overlaps'] = np.append(
entry['gt_overlaps'].toarray(), gt_overlaps, axis=0
)
entry['gt_overlaps'] = scipy.sparse.csr_matrix(entry['gt_overlaps'])
entry['is_crowd'] = np.append(
entry['is_crowd'],
np.zeros((num_boxes), dtype=entry['is_crowd'].dtype)
)
entry['box_to_gt_ind_map'] = np.append(
entry['box_to_gt_ind_map'],
box_to_gt_ind_map.astype(
entry['box_to_gt_ind_map'].dtype, copy=False
)
)
def json_dataset_add_proposals(roidb, rois, scales, crowd_thresh):
"""Add proposal boxes (rois) to an roidb that has ground-truth annotations
but no proposals. If the proposals are not at the original image scale,
specify the scale factor that separate them in scales.
"""
box_list = []
for i in range(len(roidb)):
inv_im_scale = 1. / scales[i]
idx = np.where(rois[:, 0] == i)[0]
box_list.append(rois[idx, 1:] * inv_im_scale)
_merge_proposal_boxes_into_roidb(roidb, box_list)
if crowd_thresh > 0:
_filter_crowd_proposals(roidb, crowd_thresh)
_add_class_assignments(roidb)
def blob_utils_deserialize(arr):
"""Unserialize a Python object from an array of float32 values fetched from
a workspace. See serialize().
"""
return pickle.loads(arr.astype(np.uint8).tobytes())
def box_utils_boxes_area(boxes):
"""Compute the area of an array of boxes."""
w = (boxes[:, 2] - boxes[:, 0] + 1)
h = (boxes[:, 3] - boxes[:, 1] + 1)
areas = w * h
assert np.all(areas >= 0), 'Negative areas founds'
return areas
def fpn_map_rois_to_fpn_levels(rois, k_min, k_max):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
# Compute level ids
s = np.sqrt(box_utils_boxes_area(rois))
s0 = cfg.FPN.ROI_CANONICAL_SCALE # default: 224
lvl0 = cfg.FPN.ROI_CANONICAL_LEVEL # default: 4
# Eqn.(1) in FPN paper
target_lvls = np.floor(lvl0 + np.log2(s / s0 + 1e-6))
target_lvls = np.clip(target_lvls, k_min, k_max)
return target_lvls
def distribute(rois, label_blobs, outputs, train):
"""To understand the output blob order see return value of
detectron.roi_data.fast_rcnn.get_fast_rcnn_blob_names(is_training=False)
"""
lvl_min = cfg.FPN.ROI_MIN_LEVEL
lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvls = fpn_map_rois_to_fpn_levels(rois[:, 1:5], lvl_min, lvl_max)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
# Create new roi blobs for each FPN level
# (See: modeling.FPN.add_multilevel_roi_blobs which is similar but annoying
# to generalize to support this particular case.)
rois_idx_order = np.empty((0, ))
for output_idx, lvl in enumerate(range(lvl_min, lvl_max + 1)):
idx_lvl = np.where(lvls == lvl)[0]
blob_roi_level = rois[idx_lvl, :]
outputs[output_idx + 1].reshape(blob_roi_level.shape)
outputs[output_idx + 1].data[...] = blob_roi_level
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_idx_restore = np.argsort(rois_idx_order)
blob_utils_py_op_copy_blob(rois_idx_restore.astype(np.int32), outputs[-1])
def collect(inputs, is_training):
cfg_key = 'TRAIN' if is_training else 'TEST'
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
num_lvls = k_max - k_min + 1
roi_inputs = inputs[:num_lvls]
score_inputs = inputs[num_lvls:]
if is_training:
score_inputs = score_inputs[:-2]
# rois are in [[batch_idx, x0, y0, x1, y2], ...] format
# Combine predictions across all levels and retain the top scoring
rois = np.concatenate([blob.data for blob in roi_inputs])
scores = np.concatenate([blob.data for blob in score_inputs]).squeeze()
inds = np.argsort(-scores)[:post_nms_topN]
rois = rois[inds, :]
return rois
def box_utils_bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)):
"""Forward transform that maps proposal boxes to predicted ground-truth
boxes using bounding-box regression deltas. See bbox_transform_inv for a
description of the weights argument.
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into np.exp()
dw = np.minimum(dw, cfg.BBOX_XFORM_CLIP)
dh = np.minimum(dh, cfg.BBOX_XFORM_CLIP)
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1
# y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1
return pred_boxes
def c2_utils_CudaDevice(gpu_id):
"""Create a Cuda device."""
return core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
@contextlib.contextmanager
def c2_utils_CudaScope(gpu_id):
"""Create a CUDA device scope for GPU device `gpu_id`."""
gpu_dev = c2_utils_CudaDevice(gpu_id)
with core.DeviceScope(gpu_dev):
yield
@contextlib.contextmanager
def c2_utils_GpuNameScope(gpu_id):
"""Create a name scope for GPU device `gpu_id`."""
with core.NameScope('gpu_{:d}'.format(gpu_id)):
yield
@contextlib.contextmanager
def c2_utils_NamedCudaScope(gpu_id):
"""Creates a GPU name scope and CUDA device scope. This function is provided
to reduce `with ...` nesting levels."""
with c2_utils_GpuNameScope(gpu_id):
with c2_utils_CudaScope(gpu_id):
yield
@contextlib.contextmanager
def c2_utils_CpuScope():
"""Create a CPU device scope."""
cpu_dev = core.DeviceOption(caffe2_pb2.CPU)
with core.DeviceScope(cpu_dev):
yield
class DensePoseMethods:
def __init__(self):
#
ALP_UV = loadmat( os.path.join(os.path.dirname(__file__), 'assets/UV_Processed.mat') )
self.FaceIndices = np.array( ALP_UV['All_FaceIndices']).squeeze()
self.FacesDensePose = ALP_UV['All_Faces']-1
self.U_norm = ALP_UV['All_U_norm'].squeeze()
self.V_norm = ALP_UV['All_V_norm'].squeeze()
self.All_vertices = ALP_UV['All_vertices'][0]
## Info to compute symmetries.
self.SemanticMaskSymmetries = [0,1,3,2,5,4,7,6,9,8,11,10,13,12,14]
self.Index_Symmetry_List = [1,2,4,3,6,5,8,7,10,9,12,11,14,13,16,15,18,17,20,19,22,21,24,23];
UV_symmetry_filename = os.path.join(os.path.dirname(__file__), 'assets/UV_symmetry_transforms.mat')
self.UV_symmetry_transformations = loadmat( UV_symmetry_filename )
def get_symmetric_densepose(self,I,U,V,x,y,Mask):
### This is a function to get the mirror symmetric UV labels.
Labels_sym= np.zeros(I.shape)
U_sym= np.zeros(U.shape)
V_sym= np.zeros(V.shape)
###
for i in ( range(24)):
if i+1 in I:
Labels_sym[I == (i+1)] = self.Index_Symmetry_List[i]
jj = np.where(I == (i+1))
###
U_loc = (U[jj]*255).astype(np.int64)
V_loc = (V[jj]*255).astype(np.int64)
###
V_sym[jj] = self.UV_symmetry_transformations['V_transforms'][0,i][V_loc,U_loc]
U_sym[jj] = self.UV_symmetry_transformations['U_transforms'][0,i][V_loc,U_loc]
##
Mask_flip = np.fliplr(Mask)
Mask_flipped = np.zeros(Mask.shape)
#
for i in ( range(14)):
Mask_flipped[Mask_flip == (i+1)] = self.SemanticMaskSymmetries[i+1]
#
[y_max , x_max ] = Mask_flip.shape
y_sym = y
x_sym = x_max-x
#
return Labels_sym , U_sym , V_sym , x_sym , y_sym , Mask_flipped
def barycentric_coordinates_exists(self,P0, P1, P2, P):
u = P1 - P0
v = P2 - P0
w = P - P0
#
vCrossW = np.cross(v,w)
vCrossU = np.cross(v, u)
if (np.dot(vCrossW, vCrossU) < 0):
return False;
#
uCrossW = np.cross(u, w)
uCrossV = np.cross(u, v)
#
if (np.dot(uCrossW, uCrossV) < 0):
return False;
#
denom = np.sqrt((uCrossV**2).sum())
r = np.sqrt((vCrossW**2).sum())/denom
t = np.sqrt((uCrossW**2).sum())/denom
#
return((r <=1) & (t <= 1) & (r + t <= 1))
def barycentric_coordinates(self,P0, P1, P2, P):
u = P1 - P0
v = P2 - P0
w = P - P0
#
vCrossW = np.cross(v,w)
vCrossU = np.cross(v, u)
#
uCrossW = np.cross(u, w)
uCrossV = np.cross(u, v)
#
denom = np.sqrt((uCrossV**2).sum())
r = np.sqrt((vCrossW**2).sum())/denom
t = np.sqrt((uCrossW**2).sum())/denom
#
return(1-(r+t),r,t)
def IUV2FBC( self, I_point , U_point, V_point):
P = [ U_point , V_point , 0 ]
FaceIndicesNow = np.where( self.FaceIndices == I_point )
FacesNow = self.FacesDensePose[FaceIndicesNow]
#
P_0 = np.vstack( (self.U_norm[FacesNow][:,0], self.V_norm[FacesNow][:,0], np.zeros(self.U_norm[FacesNow][:,0].shape))).transpose()
P_1 = np.vstack( (self.U_norm[FacesNow][:,1], self.V_norm[FacesNow][:,1], np.zeros(self.U_norm[FacesNow][:,1].shape))).transpose()
P_2 = np.vstack( (self.U_norm[FacesNow][:,2], self.V_norm[FacesNow][:,2], np.zeros(self.U_norm[FacesNow][:,2].shape))).transpose()
#
for i, [P0,P1,P2] in enumerate( zip(P_0,P_1,P_2)) :
if(self.barycentric_coordinates_exists(P0, P1, P2, P)):
[bc1,bc2,bc3] = self.barycentric_coordinates(P0, P1, P2, P)
return(FaceIndicesNow[0][i],bc1,bc2,bc3)
#
# If the found UV is not inside any faces, select the vertex that is closest!
#
D1 = scipy.spatial.distance.cdist( np.array( [U_point,V_point])[np.newaxis,:] , P_0[:,0:2]).squeeze()
D2 = scipy.spatial.distance.cdist( np.array( [U_point,V_point])[np.newaxis,:] , P_1[:,0:2]).squeeze()
D3 = scipy.spatial.distance.cdist( np.array( [U_point,V_point])[np.newaxis,:] , P_2[:,0:2]).squeeze()
#
minD1 = D1.min()
minD2 = D2.min()
minD3 = D3.min()
#
if((minD1< minD2) & (minD1< minD3)):
return( FaceIndicesNow[0][np.argmin(D1)] , 1.,0.,0. )
elif((minD2< minD1) & (minD2< minD3)):
return( FaceIndicesNow[0][np.argmin(D2)] , 0.,1.,0. )
else:
return( FaceIndicesNow[0][np.argmin(D3)] , 0.,0.,1. )
def FBC2PointOnSurface( self, FaceIndex, bc1,bc2,bc3,Vertices ):
##
Vert_indices = self.All_vertices[self.FacesDensePose[FaceIndex]]-1
##
p = Vertices[Vert_indices[0],:] * bc1 + \
Vertices[Vert_indices[1],:] * bc2 + \
Vertices[Vert_indices[2],:] * bc3
##
return(p)
class CollectAndDistributeFpnRpnProposalsOp(object):
def __init__(self, train):
self._train = train
def forward(self, inputs, outputs):
"""See modeling.detector.CollectAndDistributeFpnRpnProposals for
inputs/outputs documentation.
"""
# inputs is
# [rpn_rois_fpn2, ..., rpn_rois_fpn6,
# rpn_roi_probs_fpn2, ..., rpn_roi_probs_fpn6]
# If training with Faster R-CNN, then inputs will additionally include
# + [roidb, im_info]
rois = collect(inputs, self._train)
if self._train:
# During training we reuse the data loader code. We populate roidb
# entries on the fly using the rois generated by RPN.
# im_info: [[im_height, im_width, im_scale], ...]
im_info = inputs[-1].data
im_scales = im_info[:, 2]
roidb = blob_utils_deserialize(inputs[-2].data)
# For historical consistency with the original Faster R-CNN
# implementation we are *not* filtering crowd proposals.
# This choice should be investigated in the future (it likely does
# not matter).
json_dataset_add_proposals(roidb, rois, im_scales, crowd_thresh=0)
roidb_utils_add_bbox_regression_targets(roidb)
# Compute training labels for the RPN proposals; also handles
# distributing the proposals over FPN levels
output_blob_names = fast_rcnn_roi_data_get_fast_rcnn_blob_names()
blobs = {k: [] for k in output_blob_names}
fast_rcnn_roi_data_add_fast_rcnn_blobs(blobs, im_scales, roidb)
for i, k in enumerate(output_blob_names):
blob_utils_py_op_copy_blob(blobs[k], outputs[i])
else:
# For inference we have a special code path that avoids some data
# loader overhead
distribute(rois, None, outputs, self._train)
class GenerateProposalLabelsOp(object):
def forward(self, inputs, outputs):
"""See modeling.detector.GenerateProposalLabels for inputs/outputs
documentation.
"""
# During training we reuse the data loader code. We populate roidb
# entries on the fly using the rois generated by RPN.
# im_info: [[im_height, im_width, im_scale], ...]
rois = inputs[0].data
roidb = blob_utils_deserialize(inputs[1].data)
im_info = inputs[2].data
im_scales = im_info[:, 2]
output_blob_names = fast_rcnn_roi_data_get_fast_rcnn_blob_names()
# For historical consistency with the original Faster R-CNN
# implementation we are *not* filtering crowd proposals.
# This choice should be investigated in the future (it likely does
# not matter).
json_dataset_add_proposals(roidb, rois, im_scales, crowd_thresh=0)
roidb_utils_add_bbox_regression_targets(roidb)
blobs = {k: [] for k in output_blob_names}
fast_rcnn_roi_data_add_fast_rcnn_blobs(blobs, im_scales, roidb)
for i, k in enumerate(output_blob_names):
blob_utils_py_op_copy_blob(blobs[k], outputs[i])
class GenerateProposalsOp(object):
"""Output object detection proposals by applying estimated bounding-box
transformations to a set of regular boxes (called "anchors").
"""
def __init__(self, anchors, spatial_scale, train):
self._anchors = anchors
self._num_anchors = self._anchors.shape[0]
self._feat_stride = 1. / spatial_scale
self._train = train
def forward(self, inputs, outputs):
"""See modeling.detector.GenerateProposals for inputs/outputs
documentation.
"""
# 1. for each location i in a (H, W) grid:
# generate A anchor boxes centered on cell i
# apply predicted bbox deltas to each of the A anchors at cell i
# 2. clip predicted boxes to image
# 3. remove predicted boxes with either height or width < threshold
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take the top pre_nms_topN proposals before NMS
# 6. apply NMS with a loose threshold (0.7) to the remaining proposals
# 7. take after_nms_topN proposals after NMS
# 8. return the top proposals
# predicted probability of fg object for each RPN anchor
scores = inputs[0].data
# predicted achors transformations
bbox_deltas = inputs[1].data
# input image (height, width, scale), in which scale is the scale factor
# applied to the original dataset image to get the network input image
im_info = inputs[2].data
# 1. Generate proposals from bbox deltas and shifted anchors
height, width = scores.shape[-2:]
# Enumerate all shifted positions on the (H, W) grid
shift_x = np.arange(0, width) * self._feat_stride
shift_y = np.arange(0, height) * self._feat_stride
shift_x, shift_y = np.meshgrid(shift_x, shift_y, copy=False)
# Convert to (K, 4), K=H*W, where the columns are (dx, dy, dx, dy)
# shift pointing to each grid location
shifts = np.vstack((shift_x.ravel(), shift_y.ravel(),
shift_x.ravel(), shift_y.ravel())).transpose()
# Broacast anchors over shifts to enumerate all anchors at all positions
# in the (H, W) grid:
# - add A anchors of shape (1, A, 4) to
# - K shifts of shape (K, 1, 4) to get
# - all shifted anchors of shape (K, A, 4)
# - reshape to (K*A, 4) shifted anchors
num_images = inputs[0].shape[0]
A = self._num_anchors
K = shifts.shape[0]
all_anchors = self._anchors[np.newaxis, :, :] + shifts[:, np.newaxis, :]
all_anchors = all_anchors.reshape((K * A, 4))
rois = np.empty((0, 5), dtype=np.float32)
roi_probs = np.empty((0, 1), dtype=np.float32)
for im_i in range(num_images):
im_i_boxes, im_i_probs = self.proposals_for_one_image(
im_info[im_i, :], all_anchors, bbox_deltas[im_i, :, :, :],
scores[im_i, :, :, :]
)
batch_inds = im_i * np.ones(
(im_i_boxes.shape[0], 1), dtype=np.float32
)
im_i_rois = np.hstack((batch_inds, im_i_boxes))
rois = np.append(rois, im_i_rois, axis=0)
roi_probs = np.append(roi_probs, im_i_probs, axis=0)
outputs[0].reshape(rois.shape)
outputs[0].data[...] = rois
if len(outputs) > 1:
outputs[1].reshape(roi_probs.shape)
outputs[1].data[...] = roi_probs
def proposals_for_one_image(
self, im_info, all_anchors, bbox_deltas, scores
):
# Get mode-dependent configuration
cfg_key = 'TRAIN' if self._train else 'TEST'
pre_nms_topN = cfg[cfg_key].RPN_PRE_NMS_TOP_N
post_nms_topN = cfg[cfg_key].RPN_POST_NMS_TOP_N
nms_thresh = cfg[cfg_key].RPN_NMS_THRESH
min_size = cfg[cfg_key].RPN_MIN_SIZE
# Transpose and reshape predicted bbox transformations to get them
# into the same order as the anchors:
# - bbox deltas will be (4 * A, H, W) format from conv output
# - transpose to (H, W, 4 * A)
# - reshape to (H * W * A, 4) where rows are ordered by (H, W, A)
# in slowest to fastest order to match the enumerated anchors
bbox_deltas = bbox_deltas.transpose((1, 2, 0)).reshape((-1, 4))
# Same story for the scores:
# - scores are (A, H, W) format from conv output
# - transpose to (H, W, A)
# - reshape to (H * W * A, 1) where rows are ordered by (H, W, A)
# to match the order of anchors and bbox_deltas
scores = scores.transpose((1, 2, 0)).reshape((-1, 1))
# 4. sort all (proposal, score) pairs by score from highest to lowest
# 5. take top pre_nms_topN (e.g. 6000)
if pre_nms_topN <= 0 or pre_nms_topN >= len(scores):
order = np.argsort(-scores.squeeze())
else:
# Avoid sorting possibly large arrays; First partition to get top K
# unsorted and then sort just those (~20x faster for 200k scores)
inds = np.argpartition(
-scores.squeeze(), pre_nms_topN
)[:pre_nms_topN]
order = np.argsort(-scores[inds].squeeze())
order = inds[order]
bbox_deltas = bbox_deltas[order, :]
all_anchors = all_anchors[order, :]
scores = scores[order]
# Transform anchors into proposals via bbox transformations
proposals = box_utils_bbox_transform(
all_anchors, bbox_deltas, (1.0, 1.0, 1.0, 1.0))
# 2. clip proposals to image (may result in proposals with zero area
# that will be removed in the next step)
proposals = box_utils_clip_tiled_boxes(proposals, im_info[:2])
# 3. remove predicted boxes with either height or width < min_size
keep = _filter_boxes(proposals, min_size, im_info)
proposals = proposals[keep, :]
scores = scores[keep]
# 6. apply loose nms (e.g. threshold = 0.7)
# 7. take after_nms_topN (e.g. 300)
# 8. return the top proposals (-> RoIs top)
if nms_thresh > 0:
keep = box_utils_nms(np.hstack((proposals, scores)), nms_thresh)
if post_nms_topN > 0:
keep = keep[:post_nms_topN]
proposals = proposals[keep, :]
scores = scores[keep]
return proposals, scores
class DetectionModelHelper(cnn.CNNModelHelper):
def __init__(self, **kwargs):
# Handle args specific to the DetectionModelHelper, others pass through
# to CNNModelHelper
self.train = kwargs.get('train', False)
self.num_classes = kwargs.get('num_classes', -1)
assert self.num_classes > 0, 'num_classes must be > 0'
for k in ('train', 'num_classes'):
if k in kwargs:
del kwargs[k]
kwargs['order'] = 'NCHW'
# Defensively set cudnn_exhaustive_search to False in case the default
# changes in CNNModelHelper. The detection code uses variable size
# inputs that might not play nicely with cudnn_exhaustive_search.
kwargs['cudnn_exhaustive_search'] = False
super(DetectionModelHelper, self).__init__(**kwargs)
self.roi_data_loader = None
self.losses = []
self.metrics = []
self.do_not_update_params = [] # Param on this list are not updated
self.net.Proto().type = cfg.MODEL.EXECUTION_TYPE
self.net.Proto().num_workers = cfg.NUM_GPUS * 4
self.prev_use_cudnn = self.use_cudnn
self.gn_params = [] # Param on this list are GroupNorm parameters
def TrainableParams(self, gpu_id=-1):
"""Get the blob names for all trainable parameters, possibly filtered by
GPU id.
"""
return [
p for p in self.params
if (
p in self.param_to_grad and # p has a gradient
p not in self.do_not_update_params and # not on the blacklist
(gpu_id == -1 or # filter for gpu assignment, if gpu_id set
str(p).find('gpu_{}'.format(gpu_id)) == 0)
)]
def AffineChannel(self, blob_in, blob_out, dim, inplace=False):
"""Affine transformation to replace BN in networks where BN cannot be
used (e.g., because the minibatch size is too small).
The operations can be done in place to save memory.
"""
blob_out = blob_out or self.net.NextName()
param_prefix = blob_out
scale = self.create_param(
param_name=param_prefix + '_s',
initializer=initializers.Initializer("ConstantFill", value=1.),
tags=ParameterTags.WEIGHT,
shape=[dim, ],
)
bias = self.create_param(
param_name=param_prefix + '_b',
initializer=initializers.Initializer("ConstantFill", value=0.),
tags=ParameterTags.BIAS,
shape=[dim, ],
)
if inplace:
return self.net.AffineChannel([blob_in, scale, bias], blob_in)
else:
return self.net.AffineChannel([blob_in, scale, bias], blob_out)
def GenerateProposals(self, blobs_in, blobs_out, anchors, spatial_scale):
"""Op for generating RPN porposals.
blobs_in:
- 'rpn_cls_probs': 4D tensor of shape (N, A, H, W), where N is the
number of minibatch images, A is the number of anchors per
locations, and (H, W) is the spatial size of the prediction grid.
Each value represents a "probability of object" rating in [0, 1].
- 'rpn_bbox_pred': 4D tensor of shape (N, 4 * A, H, W) of predicted
deltas for transformation anchor boxes into RPN proposals.
- 'im_info': 2D tensor of shape (N, 3) where the three columns encode
the input image's [height, width, scale]. Height and width are
for the input to the network, not the original image; scale is the
scale factor used to scale the original image to the network input
size.
blobs_out:
- 'rpn_rois': 2D tensor of shape (R, 5), for R RPN proposals where the
five columns encode [batch ind, x1, y1, x2, y2]. The boxes are
w.r.t. the network input, which is a *scaled* version of the
original image; these proposals must be scaled by 1 / scale (where
scale comes from im_info; see above) to transform it back to the
original input image coordinate system.
- 'rpn_roi_probs': 1D tensor of objectness probability scores
(extracted from rpn_cls_probs; see above).
"""
name = 'GenerateProposalsOp:' + ','.join([str(b) for b in blobs_in])
# spatial_scale passed to the Python op is only used in convert_pkl_to_pb
self.net.Python(
GenerateProposalsOp(anchors, spatial_scale, self.train).forward
)(blobs_in, blobs_out, name=name, spatial_scale=spatial_scale)
return blobs_out
def GenerateProposalLabels(self, blobs_in):
"""Op for generating training labels for RPN proposals. This is used
when training RPN jointly with Fast/Mask R-CNN (as in end-to-end
Faster R-CNN training).
blobs_in:
- 'rpn_rois': 2D tensor of RPN proposals output by GenerateProposals
- 'roidb': roidb entries that will be labeled
- 'im_info': See GenerateProposals doc.
blobs_out:
- (variable set of blobs): returns whatever blobs are required for
training the model. It does this by querying the data loader for
the list of blobs that are needed.
"""
name = 'GenerateProposalLabelsOp:' + ','.join(
[str(b) for b in blobs_in]
)
# The list of blobs is not known before run-time because it depends on
# the specific model being trained. Query the data loader to get the
# list of output blob names.
blobs_out = fast_rcnn_roi_data_get_fast_rcnn_blob_names(
is_training=self.train
)
blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]
self.net.Python(GenerateProposalLabelsOp().forward)(
blobs_in, blobs_out, name=name
)
return blobs_out
def CollectAndDistributeFpnRpnProposals(self):
"""Merge RPN proposals generated at multiple FPN levels and then
distribute those proposals to their appropriate FPN levels. An anchor
at one FPN level may predict an RoI that will map to another level,
hence the need to redistribute the proposals.
This function assumes standard blob names for input and output blobs.
Input blobs: [rpn_rois_fpn<min>, ..., rpn_rois_fpn<max>,
rpn_roi_probs_fpn<min>, ..., rpn_roi_probs_fpn<max>]
- rpn_rois_fpn<i> are the RPN proposals for FPN level i; see rpn_rois
documentation from GenerateProposals.
- rpn_roi_probs_fpn<i> are the RPN objectness probabilities for FPN
level i; see rpn_roi_probs documentation from GenerateProposals.
If used during training, then the input blobs will also include:
[roidb, im_info] (see GenerateProposalLabels).
Output blobs: [rois_fpn<min>, ..., rois_rpn<max>, rois,
rois_idx_restore]
- rois_fpn<i> are the RPN proposals for FPN level i
- rois_idx_restore is a permutation on the concatenation of all
rois_fpn<i>, i=min...max, such that when applied the RPN RoIs are
restored to their original order in the input blobs.
If used during training, then the output blobs will also include:
[labels, bbox_targets, bbox_inside_weights, bbox_outside_weights].
"""
k_max = cfg.FPN.RPN_MAX_LEVEL
k_min = cfg.FPN.RPN_MIN_LEVEL
# Prepare input blobs
rois_names = ['rpn_rois_fpn' + str(l) for l in range(k_min, k_max + 1)]
score_names = [
'rpn_roi_probs_fpn' + str(l) for l in range(k_min, k_max + 1)
]
blobs_in = rois_names + score_names
if self.train:
blobs_in += ['roidb', 'im_info']
blobs_in = [core.ScopedBlobReference(b) for b in blobs_in]
name = 'CollectAndDistributeFpnRpnProposalsOp:' + ','.join(
[str(b) for b in blobs_in]
)
# Prepare output blobs
blobs_out = fast_rcnn_roi_data_get_fast_rcnn_blob_names(
is_training=self.train
)
blobs_out = [core.ScopedBlobReference(b) for b in blobs_out]
outputs = self.net.Python(
CollectAndDistributeFpnRpnProposalsOp(self.train).forward
)(blobs_in, blobs_out, name=name)
return outputs
def DropoutIfTraining(self, blob_in, dropout_rate):
"""Add dropout to blob_in if the model is in training mode and
dropout_rate is > 0."""
blob_out = blob_in
if self.train and dropout_rate > 0:
blob_out = self.Dropout(
blob_in, blob_in, ratio=dropout_rate, is_test=False
)
return blob_out
def RoIFeatureTransform(
self,
blobs_in,
blob_out,
blob_rois='rois',
method='RoIPoolF',
resolution=7,
spatial_scale=1. / 16.,
sampling_ratio=0
):
"""Add the specified RoI pooling method. The sampling_ratio argument
is supported for some, but not all, RoI transform methods.
RoIFeatureTransform abstracts away:
- Use of FPN or not
- Specifics of the transform method
"""
assert method in {'RoIPoolF', 'RoIAlign'}, \
'Unknown pooling method: {}'.format(method)
has_argmax = (method == 'RoIPoolF')
if isinstance(blobs_in, list):
# FPN case: add RoIFeatureTransform to each FPN level
k_max = cfg.FPN.ROI_MAX_LEVEL # coarsest level of pyramid
k_min = cfg.FPN.ROI_MIN_LEVEL # finest level of pyramid
assert len(blobs_in) == k_max - k_min + 1
bl_out_list = []
for lvl in range(k_min, k_max + 1):
bl_in = blobs_in[k_max - lvl] # blobs_in is in reversed order
sc = spatial_scale[k_max - lvl] # in reversed order
bl_rois = blob_rois + '_fpn' + str(lvl)
bl_out = blob_out + '_fpn' + str(lvl)
bl_out_list.append(bl_out)
bl_argmax = ['_argmax_' + bl_out] if has_argmax else []
self.net.__getattr__(method)(
[bl_in, bl_rois], [bl_out] + bl_argmax,
pooled_w=resolution,
pooled_h=resolution,
spatial_scale=sc,
sampling_ratio=sampling_ratio
)
# The pooled features from all levels are concatenated along the
# batch dimension into a single 4D tensor.
xform_shuffled, _ = self.net.Concat(
bl_out_list, [blob_out + '_shuffled', '_concat_' + blob_out],
axis=0
)
# Unshuffle to match rois from dataloader
restore_bl = blob_rois + '_idx_restore_int32'
xform_out = self.net.BatchPermutation(
[xform_shuffled, restore_bl], blob_out
)
else:
# Single feature level
bl_argmax = ['_argmax_' + blob_out] if has_argmax else []
# sampling_ratio is ignored for RoIPoolF
xform_out = self.net.__getattr__(method)(
[blobs_in, blob_rois], [blob_out] + bl_argmax,
pooled_w=resolution,
pooled_h=resolution,
spatial_scale=spatial_scale,
sampling_ratio=sampling_ratio
)
# Only return the first blob (the transformed features)
return xform_out[0] if isinstance(xform_out, tuple) else xform_out
def ConvShared(
self,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight=None,
bias=None,
**kwargs
):
"""Add conv op that shares weights and/or biases with another conv op.
"""
use_bias = (
False if ('no_bias' in kwargs and kwargs['no_bias']) else True
)
if self.use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = self.cudnn_exhaustive_search
if self.ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = self.ws_nbytes_limit
if use_bias:
blobs_in = [blob_in, weight, bias]
else:
blobs_in = [blob_in, weight]
if 'no_bias' in kwargs:
del kwargs['no_bias']
return self.net.Conv(
blobs_in, blob_out, kernel=kernel, order=self.order, **kwargs
)
def BilinearInterpolation(
self, blob_in, blob_out, dim_in, dim_out, up_scale
):
"""Bilinear interpolation in space of scale.
Takes input of NxKxHxW and outputs NxKx(sH)x(sW), where s:= up_scale
Adapted from the CVPR'15 FCN code.
See: https://github.com/shelhamer/fcn.berkeleyvision.org/blob/master/surgery.py
"""
assert dim_in == dim_out
assert up_scale % 2 == 0, 'Scale should be even'
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return ((1 - abs(og[0] - center) / factor) *
(1 - abs(og[1] - center) / factor))
kernel_size = up_scale * 2
bil_filt = upsample_filt(kernel_size)
kernel = np.zeros(
(dim_in, dim_out, kernel_size, kernel_size), dtype=np.float32
)
kernel[range(dim_out), range(dim_in), :, :] = bil_filt
blob = self.ConvTranspose(
blob_in,
blob_out,
dim_in,
dim_out,
kernel_size,
stride=int(up_scale),
pad=int(up_scale / 2),
weight_init=('GivenTensorFill', {'values': kernel}),
bias_init=('ConstantFill', {'value': 0.})
)
self.do_not_update_params.append(self.weights[-1])
self.do_not_update_params.append(self.biases[-1])
return blob
def ConvAffine( # args in the same order of Conv()
self, blob_in, prefix, dim_in, dim_out, kernel, stride, pad,
group=1, dilation=1,
weight_init=None,
bias_init=None,
suffix='_bn',
inplace=False
):
"""ConvAffine adds a Conv op followed by a AffineChannel op (which
replaces BN during fine tuning).
"""
conv_blob = self.Conv(
blob_in,
prefix,
dim_in,
dim_out,
kernel,
stride=stride,
pad=pad,
group=group,
dilation=dilation,
weight_init=weight_init,
bias_init=bias_init,
no_bias=1
)
blob_out = self.AffineChannel(
conv_blob, prefix + suffix, dim=dim_out, inplace=inplace
)
return blob_out
def ConvGN( # args in the same order of Conv()
self, blob_in, prefix, dim_in, dim_out, kernel, stride, pad,
group_gn, # num of groups in gn
group=1, dilation=1,
weight_init=None,
bias_init=None,
suffix='_gn',
no_conv_bias=1,
):
"""ConvGN adds a Conv op followed by a GroupNorm op,
including learnable scale/bias (gamma/beta)
"""
conv_blob = self.Conv(
blob_in,
prefix,
dim_in,
dim_out,
kernel,
stride=stride,
pad=pad,
group=group,
dilation=dilation,
weight_init=weight_init,
bias_init=bias_init,
no_bias=no_conv_bias)
if group_gn < 1:
logger.warning(
'Layer: {} (dim {}): '
'group_gn < 1; reset to 1.'.format(prefix, dim_in)
)
group_gn = 1
blob_out = self.SpatialGN(
conv_blob, prefix + suffix,
dim_out, num_groups=group_gn,
epsilon=cfg.GROUP_NORM.EPSILON,)
self.gn_params.append(self.params[-1]) # add gn's bias to list
self.gn_params.append(self.params[-2]) # add gn's scale to list
return blob_out
def DisableCudnn(self):
self.prev_use_cudnn = self.use_cudnn
self.use_cudnn = False
def RestorePreviousUseCudnn(self):
prev_use_cudnn = self.use_cudnn
self.use_cudnn = self.prev_use_cudnn
self.prev_use_cudnn = prev_use_cudnn
def UpdateWorkspaceLr(self, cur_iter, new_lr):
"""Updates the model's current learning rate and the workspace (learning
rate and update history/momentum blobs).
"""
# The workspace is the one source of truth for the lr
# The lr is always the same on all GPUs
cur_lr = workspace.FetchBlob('gpu_0/lr')[0]
# There are no type conversions between the lr in Python and the lr in
# the GPU (both are float32), so exact comparision is ok
if cur_lr != new_lr:
ratio = _get_lr_change_ratio(cur_lr, new_lr)
self._SetNewLr(cur_lr, new_lr)
return new_lr
def _SetNewLr(self, cur_lr, new_lr):
"""Do the actual work of updating the model and workspace blobs.
"""
for i in range(cfg.NUM_GPUS):
with c2_utils_CudaScope(i):
workspace.FeedBlob(
'gpu_{}/lr'.format(i), np.array([new_lr], dtype=np.float32))
ratio = _get_lr_change_ratio(cur_lr, new_lr)
if cfg.SOLVER.SCALE_MOMENTUM and cur_lr > 1e-7 and \
ratio > cfg.SOLVER.SCALE_MOMENTUM_THRESHOLD:
self._CorrectMomentum(new_lr / cur_lr)
def _CorrectMomentum(self, correction):
"""The MomentumSGDUpdate op implements the update V as
V := mu * V + lr * grad,
where mu is the momentum factor, lr is the learning rate, and grad is
the stochastic gradient. Since V is not defined independently of the
learning rate (as it should ideally be), when the learning rate is
changed we should scale the update history V in order to make it
compatible in scale with lr * grad.
"""
for i in range(cfg.NUM_GPUS):
with c2_utils_CudaScope(i):
for param in self.TrainableParams(gpu_id=i):
op = core.CreateOperator(
'Scale', [param + '_momentum'], [param + '_momentum'],
scale=correction)
workspace.RunOperatorOnce(op)
def GetLossScale(self):
"""Allow a way to configure the loss scale dynamically.
This may be used in a distributed data parallel setting.
"""
return 1.0 / cfg.NUM_GPUS
def AddLosses(self, losses):
if not isinstance(losses, list):
losses = [losses]
# Conversion to str allows losses to include BlobReferences
losses = [c2_utils_UnscopeName(str(l)) for l in losses]
self.losses = list(set(self.losses + losses))
def AddMetrics(self, metrics):
if not isinstance(metrics, list):
metrics = [metrics]
self.metrics = list(set(self.metrics + metrics))
class Timer(object):
"""A simple timer."""
def __init__(self):
self.reset()
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def reset(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
class AttrDict(dict):
IMMUTABLE = '__immutable__'
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__[AttrDict.IMMUTABLE] = False
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
elif name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if not self.__dict__[AttrDict.IMMUTABLE]:
if name in self.__dict__:
self.__dict__[name] = value
else:
self[name] = value
else:
raise AttributeError(
'Attempted to set "{}" to "{}", but AttrDict is immutable'.
format(name, value)
)
def immutable(self, is_immutable):
"""Set immutability to is_immutable and recursively apply the setting
to all nested AttrDicts.
"""
self.__dict__[AttrDict.IMMUTABLE] = is_immutable
# Recursively set immutable state
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
for v in self.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
def is_immutable(self):
return self.__dict__[AttrDict.IMMUTABLE]
c2_utils_import_detectron_ops()
cv2.ocl.setUseOpenCL(False)
DP = DensePoseMethods()
def main(args_im_or_folder, args_cfg, args_output_dir, args_image_ext, args_weights):
logger = logging.getLogger(__name__)
merge_cfg_from_file(args_cfg)
cfg.NUM_GPUS = 1
args_weights = cache_url(args_weights, cfg.DOWNLOAD_CACHE)
assert_and_infer_cfg(cache_urls=False)
model = infer_engine_initialize_model_from_cfg(args_weights)
dummy_coco_dataset = dummy_datasets_get_coco_dataset()
if os.path.isdir(args_im_or_folder):
im_list = glob.iglob(args_im_or_folder + '/*.' + args_image_ext)
else:
im_list = [args_im_or_folder]
for i, im_name in enumerate(im_list):
out_name = os.path.join(
args_output_dir, '{}'.format(os.path.basename(im_name) + '.pdf')
)
#logger.info('Processing {} -> {}'.format(im_name, out_name))
im = cv2.imread(im_name)
timers = defaultdict(Timer)
t = time.time()
with c2_utils_NamedCudaScope(0):
cls_boxes, cls_segms, cls_keyps, cls_bodys = infer_engine_im_detect_all(
model, im, None, timers=timers
)
vis_utils_vis_one_image(
im[:, :, ::-1], # BGR -> RGB for visualization
im_name,
args_output_dir,
cls_boxes,
cls_segms,
cls_keyps,
cls_bodys,
dataset=dummy_coco_dataset,
box_alpha=0.3,
show_class=True,
thresh=0.7,
kp_thresh=2
)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
main('data/', 'assets/config.yaml',
'output/', 'jpg',
'https://dl.fbaipublicfiles.com/densepose/DensePose_ResNet101_FPN_s1x-e2e.pkl')
|
py
|
1a5b0f9a48e02cbb1e03f049663aab8631cad56c
|
class FilterModule(object):
''' Returns dictionary of items with keyed overrides that override properties with a matching selector'''
def filters(self):
return {
'dict_override': dict_override
}
def dict_override(source, overrides, selector='Type'):
return {pk:v for k,v in overrides.items() for pk, pv in source.items() if pv.get(selector) == k}
|
py
|
1a5b0fb907237c60454a2989b7cd1efb742fe2d6
|
import komand
from .schema import UpdateSiteIncludedTargetsInput, UpdateSiteIncludedTargetsOutput, Input
# Custom imports below
from komand_rapid7_insightvm.util import endpoints
from komand_rapid7_insightvm.util.resource_requests import ResourceRequests
import json
class UpdateSiteIncludedTargets(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='update_site_included_targets',
description='Update an existing site scope of included ip address and hostname targets',
input=UpdateSiteIncludedTargetsInput(),
output=UpdateSiteIncludedTargetsOutput())
def run(self, params={}):
scope = params.get(Input.INCLUDED_TARGETS)
resource_helper = ResourceRequests(self.connection.session, self.logger)
endpoint = endpoints.Site.site_included_targets(self.connection.console_url, params.get(Input.ID))
# Pull current site scope in order to append to list instead of overwriting
if not params.get(Input.OVERWRITE):
current_scope = resource_helper.resource_request(endpoint=endpoint,
method='get')
self.logger.info(f"Appending to current list of included targets")
scope.extend(current_scope['addresses'])
self.logger.info(f"Using {endpoint} ...")
payload = {"rawbody": scope}
response = resource_helper.resource_request(endpoint=endpoint,
method='put',
payload=payload)
return {
"id": params.get(Input.ID),
"links": response['links']
}
|
py
|
1a5b117bdc2bf5168d950a9e580f1464ae87ed22
|
# import MySQLdb as msq
# import MySQLdb.cursors
#
#
# class BaseDAO:
# """
# Base data access object.
# Inherited by all classes that need to access the mysql database. For data analysis tasks, use pandas and sql alchemy
#
# Attributes:
# db: The mysql connection
# dbc: The mysql cursor
# cnt: Count of rows affected (default None)
# query: Query string to run
# val: Array holding the values to insert into word_map_table_creation_query
# vals: Array alias for val
# results: List of dictionaries holding results of word_map_table_creation_query
# status: String of the format 'connected to: %s' % databaseName
# test: DEPRECATED legacy string
# local: DEPRECATED legacy string
# """
#
# def __init__(self, test=False, local=False):
# """
# Args:
# test: No longer used; still here for legacy code
# local: No longer used; still here for legacy code
# """
# # self.credentials = credentials
# self.test = test
# self.local = local
# self.mysqlError = MySQLdb.Error
# self.cnt = None
# self.query = ""
# self.val = []
# self.vals = []
# self.results = []
# self.status = "Not connected"
#
# def connect(self, credentials):
# """
# Loads in a credential object for connecting to db
#
# Arguments:
# credentials: SQL_Credentials.CredentialLoader
# """
# try:
# #dsn = credentials.host() + ':' + credentials.port()
# if credentials.port() is not None:
# self.db = msq.connect(host=credentials.host(), port=credentials.port(),
# user=credentials.username(),
# passwd=credentials.password(),
# db=credentials.database(),
# charset='utf8',
# use_unicode=True,
# cursorclass=MySQLdb.cursors.DictCursor)
# else:
# self.db = msq.connect(host=credentials.host(),
# user=credentials.username(),
# passwd=credentials.password(),
# db=credentials.database(),
# charset='utf8',
# use_unicode=True,
# cursorclass=MySQLdb.cursors.DictCursor)
#
# self.db.autocommit(True)
# self.dbc = self.db.cursor()
# self.status = 'Connected to: %s' % credentials.database()
# except MySQLdb.Error as e:
# print(("Connection error : %s" % e))
# raise
#
# def executeQuery(self):
# """
# Prepares and executes the word_map_table_creation_query stored in self.word_map_table_creation_query with the variables in self.val
# Usually used for insert, update, and other functions which don't require a return
# """
# try:
# # self.checkValName()
# self.dbc.execute(self.query, self.val)
# self.cnt = self.dbc.rowcount
# except MySQLdb.Error as e:
# print(("Query failed: %s" % e))
#
# def returnOne(self):
# """
# Executes the word_map_table_creation_query stored in self.word_map_table_creation_query with the vals in self.val.
# Returns the first row in an array called self.results
# """
# try:
# # self.checkValName()
# self.dbc.execute(self.query, self.val)
# self.results = self.dbc.fetchone()
#
# except MySQLdb.Error as e:
# print(("Query failed: %s " % e))
#
# # raise
#
# def returnAll(self):
# """
# Executes the word_map_table_creation_query stored in self.word_map_table_creation_query with the vals in self.val.
# Return the results in an array called self.results
# """
# try:
# # self.checkValName()
# self.dbc.execute(self.query, self.val)
# self.results = self.dbc.fetchall()
# except MySQLdb.Error as e:
# print(("Query failed: %s " % e))
# raise
#
# def checkValName(self):
# """
# Since I sometimes may use self.val and othertimes use self.vals, this will check which is used and proceed appropriately
# """
# valLength = len(self.val)
# if valLength == 0:
# try:
# valsLength = len(self.val)
# if valsLength == 0:
# self.val = self.vals
# except Exception:
# print("No value set")
#
# def returnInsertID(self):
# """
# Returns the id of the last insert statement. Also sets self.insertedid with this value
# """
# try:
# return self.db.insert_id()
# except MySQLdb.Error as e:
# print("Error getting insert id %s " % e)
|
py
|
1a5b11f9194fbea998eb20d7054b973d8d1ac664
|
"""Subpackage ``plot_profile.plot_icon``."""
# Standard library
from typing import List
__author__ = """Michel Zeller"""
__email__ = "[email protected]"
__version__ = "0.1.1"
|
py
|
1a5b1285a56976ba6a9de12b4b66b53bbdc0b2f4
|
from .file_util import *
from .print_util import *
from .safe_convert import *
from .img_util import *
from .img_comp_split import *
from .calc_iou import *
from .logger_util import *
from .mutiprocessing_util import *
from .npy_loader import *
__all__ = [
"list_sub_dirs",
"flatten_dir_path",
"create_dir",
"safe2int",
"safe2float",
"TblPrinter",
"MutiProcessor",
"get_logger",
"gray2RGB",
"convertGray2RGB",
"convertGray2RGB_Muti",
"convertRGB2Gray",
"convertRGB2Gray_Muti",
"calc_mean_iou",
"img_compose",
"load_npy",
]
|
py
|
1a5b12b82a6427d94f1a7144ac35c36193421822
|
#!/usr/bin/env python3
import os
import sys
import urllib.request
import tarfile
import zipfile
import shutil
from typing import List, Optional
PLATFORM_WINDOWS = "windows"
PLATFORM_LINUX = "linux"
PLATFORM_MACOS = "mac"
DOTNET_RUNTIME_VERSION = "6.0.0"
DOTNET_RUNTIME_DOWNLOADS = {
PLATFORM_LINUX: "https://download.visualstudio.microsoft.com/download/pr/0ce1c34f-0d9e-4d9b-964e-da676c8e605a/7a6c353b36477fa84f85b2821f2350c2/dotnet-runtime-6.0.0-linux-x64.tar.gz",
PLATFORM_WINDOWS: "https://download.visualstudio.microsoft.com/download/pr/6b96c97d-9b8c-4141-a32a-5848d3369dbf/9972321cb7af5938fecdee2d8ebd72bb/dotnet-runtime-6.0.0-win-x64.zip",
PLATFORM_MACOS: "https://download.visualstudio.microsoft.com/download/pr/d88f74a5-05d2-46cb-886a-a62fd698009d/67f5f05e9c029d284c309f0f712fc99f/dotnet-runtime-6.0.0-osx-x64.tar.gz"
}
p = os.path.join
def main() -> None:
update_netcore_runtime(sys.argv[1:])
def update_netcore_runtime(platforms: List[str]) -> None:
runtime_cache = p("Dependencies/dotnet")
version_file_path = p(runtime_cache, "VERSION")
# Check if current version is fine.
current_version: Optional[str]
try:
with open(version_file_path, "r") as f:
current_version = f.read().strip()
except FileNotFoundError:
current_version = None
if current_version != DOTNET_RUNTIME_VERSION and os.path.exists(runtime_cache):
print("Cached Release .NET Core Runtime out of date/nonexistant, downloading new one..")
shutil.rmtree(runtime_cache)
os.makedirs(runtime_cache, exist_ok=True)
with open(version_file_path, "w") as f:
f.write(DOTNET_RUNTIME_VERSION)
# Download missing runtimes if necessary.
for platform in platforms:
platform_runtime_cache = p(runtime_cache, platform)
if not os.path.exists(platform_runtime_cache):
os.mkdir(platform_runtime_cache)
download_platform_runtime(platform_runtime_cache, platform)
def download_platform_runtime(dir: str, platform: str) -> None:
print(f"Downloading .NET Core Runtime for platform {platform}.")
download_file = p(dir, "download.tmp")
download_url = DOTNET_RUNTIME_DOWNLOADS[platform]
urllib.request.urlretrieve(download_url, download_file)
if download_url.endswith(".tar.gz"):
# this is a tar gz.
with tarfile.open(download_file, "r:gz") as tar:
tar.extractall(dir)
elif download_url.endswith(".zip"):
with zipfile.ZipFile(download_file) as zipF:
zipF.extractall(dir)
os.remove(download_file)
if __name__ == "__main__":
main()
|
py
|
1a5b12fa6d6cf43211d658cec87d2abc9b888f04
|
import yaml
import json
from os import listdir
from os.path import isfile, join
"""
{ name, kingdom, imageUrl}
"""
path = "./data/raw/image-url.yml"
stream = open(path, "r")
data = yaml.load_all(stream, yaml.Loader)
data_dicts = [
{
"name": datum["name"].lower(),
"kingdom": datum["kingdom"],
"imageUrl": datum["imageUrl"],
}
for datum in data
]
json_data = {"data": data_dicts}
with open("./data/generated/json/image-urls.json", "w") as fout:
json_dumps_str = json.dumps(json_data, indent=4)
print(json_dumps_str, file=fout)
|
py
|
1a5b132698f576a095386d154d0dba512d4482ed
|
from PyQt5 import QtGui, QtCore, QtWidgets
from PyQt5.QtWidgets import *
from tools.modeltool import *
from tools.tool import *
from tools.modeltool import *
from tools.tool import *
from tools.pathtool import *
from tools.milltask import *
from guifw.gui_elements import *
import sys, os, os.path
from solids import *
from objectviewer import *
class ModelDialog(QtGui.QWidget):
def __init__(self, viewer):
QtGui.QWidget.__init__(self)
mlayout = QtGui.QGridLayout()
self.setLayout(mlayout)
loadbutton = QtGui.QPushButton("Load")
loadbutton.clicked.connect(self.showDialog)
mlayout.addWidget(loadbutton, 0, 0)
self.modelTool = ModelTool(name="Model", object=None, viewUpdater=self.updateView)
self.toolWidget = ToolPropertyWidget(parent=self, tool=self.modelTool)
mlayout.addWidget(self.toolWidget, 1, 0)
self.viewer = viewer
self.object = Solid()
if len(sys.argv) > 1:
self.loadObject(sys.argv[1])
def updateView(self, mode='mesh'):
if mode == 'mesh':
self.viewer.showFacets(self.modelTool.object)
if mode == 'heightmap':
self.viewer.showHeightMap(self.modelTool.object)
if mode == 'slice':
self.viewer.showFacets(self.modelTool.object)
def showDialog(self):
filename = QtGui.QFileDialog.getOpenFileName(self, 'Open file', '', "STL files (*.stl)")[0]
self.loadObject(filename)
def loadObject(self, filename):
if not os.path.isfile(filename):
return
self.object = Solid()
self.object.load(filename)
self.object.__class__ = CAM_Solid
self.modelTool.object = self.object
self.updateView()
|
py
|
1a5b13746724ba3a443325da744bcf29b5e046ca
|
#!/usr/bin/env python3
import os
import sys
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
filename = sys.argv[1]
# Check restart data v. original data
sys.path.insert(0, '../../../../warpx/Examples/')
from analysis_default_restart import check_restart
check_restart(filename)
# Check-sum analysis
filename = sys.argv[1]
test_name = os.path.split(os.getcwd())[1]
checksumAPI.evaluate_checksum(test_name, filename)
|
py
|
1a5b13aed75d680b95ae73dcc2f29eb862168480
|
_base_ = './bisenetv1_r18_1024x1024_80k_cityscapes.py'
model = dict(
pretrained='torchvision://resnet18',
backbone=dict(base_model='ResNet')
)
|
py
|
1a5b142858db686acb00322fc18e8791f4a034ce
|
# model settings
temperature = 0.01
with_norm = True
query_dim = 128
model = dict(
type='UVCNeckMoCoTrackerV2',
queue_dim=query_dim,
patch_queue_size=256 * 144 * 5,
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(0, 1, 2, 3),
# strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
neck=dict(
type='FPN',
in_channels=[64, 128, 256, 512],
out_channels=256,
norm_cfg=dict(type='SyncBN', requires_grad=True),
num_outs=4,
out_index=1),
cls_head=dict(
type='UVCHead',
loss_feat=None,
loss_aff=dict(
type='ConcentrateLoss',
win_len=8,
stride=8,
temperature=temperature,
with_norm=with_norm,
loss_weight=1.),
loss_bbox=dict(type='L1Loss', loss_weight=10.),
in_channels=256,
channels=128,
temperature=temperature,
with_norm=with_norm,
init_std=0.01,
track_type='coord'),
patch_head=dict(
type='MoCoHead',
loss_feat=dict(type='MultiPairNCE', loss_weight=1.),
in_channels=512,
# num_convs=2,
# kernel_size=3,
# norm_cfg=dict(type='BN'),
# act_cfg=dict(type='ReLU'),
channels=query_dim,
temperature=0.2,
with_norm=with_norm))
# model training and testing settings
train_cfg = dict(
patch_size=96,
img_as_ref=True,
img_as_tar=False,
img_as_embed=True,
patch_geo_aug=True,
patch_color_aug=True,
diff_crop=True,
skip_cycle=True,
center_ratio=0.,
shuffle_bn=True)
test_cfg = dict(
precede_frames=7,
topk=5,
temperature=temperature,
# strides=(1, 2, 1, 1),
out_indices=(0, ),
neighbor_range=40,
with_norm=with_norm,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=2, frame_interval=8, num_clips=1),
dict(type='DuplicateFrames', times=2),
dict(type='DecordDecode'),
# dict(type='Resize', scale=(-1, 256)),
# dict(type='RandomResizedCrop', area_range=(0.2, 1.)),
dict(type='Resize', scale=(256, 256), keep_ratio=False),
dict(type='Flip', flip_ratio=0.5),
# dict(
# type='ColorJitter',
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.1,
# p=0.8,
# same_across_clip=False),
# dict(type='RandomGrayScale', p=0.2, same_across_clip=False),
# dict(type='RandomGaussianBlur', p=0.5, same_across_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=48,
workers_per_gpu=16,
val_workers_per_gpu=1,
train=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=1e-1, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 50
checkpoint_config = dict(interval=1)
evaluation = dict(
interval=1, metrics='davis', key_indicator='J&F-Mean', rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['uvc-fpn-moco2'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
|
py
|
1a5b14a3b686a1ac5b841e3a39641ef6b6255b35
|
# container-service-extension
# Copyright (c) 2017 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
"""Basic utility methods to perform data transformation and file operations."""
import hashlib
import os
import pathlib
import platform
import stat
import sys
from typing import List
import urllib
import click
import pkg_resources
from pyvcloud.vcd.vcd_api_version import VCDApiVersion
import requests
import semantic_version
from container_service_extension.logging.logger import NULL_LOGGER
# chunk size in bytes for file reading
BUF_SIZE = 65536
# chunk size for downloading files
SIZE_1MB = 1024 * 1024
_type_to_string = {
str: 'string',
int: 'number',
bool: 'true/false',
dict: 'mapping',
list: 'sequence',
}
class NullPrinter:
"""Callback object which does nothing."""
def general_no_color(self, msg):
pass
def general(self, msg):
pass
def info(self, msg):
pass
def error(self, msg):
pass
class ConsoleMessagePrinter(NullPrinter):
"""Callback object to print color coded message on console."""
def general_no_color(self, msg):
click.secho(msg)
def general(self, msg):
click.secho(msg, fg='green')
def info(self, msg):
click.secho(msg, fg='yellow')
def error(self, msg):
click.secho(msg, fg='red')
def get_cse_version():
return pkg_resources.require('container-service-extension')[0].version
def get_cse_info():
return {
'product': 'CSE',
'description': 'Container Service Extension for VMware vCloud Director', # noqa: E501
'version': get_cse_version(),
'python': platform.python_version()
}
def get_installed_cse_version() -> semantic_version.Version:
"""."""
cse_version_raw = get_cse_info()['version']
# Cleanup version string. Strip dev version string segment.
# e.g. convert '2.6.0.0b2.dev5' to '2.6.0'
tokens = cse_version_raw.split('.')[:3]
return semantic_version.Version('.'.join(tokens))
def prompt_text(text, color='black', hide_input=False, type=str):
click_text = click.style(str(text), fg=color)
return click.prompt(click_text, hide_input=hide_input, type=type)
def is_environment_variable_enabled(env_var_name):
"""Check if the environment variable is set.
:param str env_var_name: Name of the environment variable
:rtype: bool
"""
return str_to_bool(os.getenv(env_var_name))
def get_duplicate_items_in_list(items):
"""Find duplicate entries in a list.
:param list items: list of items with possible duplicates.
:return: the items that occur more than once in input list. Each duplicated
item will be mentioned only once in the returned list.
:rtype: list
"""
seen = set()
duplicates = set()
if items:
for item in items:
if item in seen:
duplicates.add(item)
else:
seen.add(item)
return list(duplicates)
def check_keys_and_value_types(dikt, ref_dict, location='dictionary',
excluded_keys=None,
msg_update_callback=NullPrinter()):
"""Compare a dictionary with a reference dictionary.
The method ensures that all keys and value types are the same in the
dictionaries.
:param dict dikt: the dictionary to check for validity
:param dict ref_dict: the dictionary to check against
:param str location: where this check is taking place, so error messages
can be more descriptive.
:param list excluded_keys: list of str, representing the list of key which
if missing won't raise an exception.
:param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
:raises KeyError: if @dikt has missing or invalid keys
:raises TypeError: if the value of a property in @dikt does not match with
the value of the same property in @ref_dict
"""
if excluded_keys is None:
excluded_keys = []
ref_keys = set(ref_dict.keys())
keys = set(dikt.keys())
missing_keys = ref_keys - keys - set(excluded_keys)
if missing_keys:
msg_update_callback.error(
f"Missing keys in {location}: {missing_keys}")
bad_value = False
for k in ref_keys:
if k not in keys:
continue
value_type = type(ref_dict[k])
if not isinstance(dikt[k], value_type):
msg_update_callback.error(
f"{location} key '{k}': value type should be "
f"'{_type_to_string[value_type]}'")
bad_value = True
if missing_keys:
raise KeyError(f"Missing and/or invalid key in {location}")
if bad_value:
raise TypeError(f"Incorrect type for property value(s) in {location}")
def check_python_version(msg_update_callback=NullPrinter()):
"""Ensure that user's Python version >= 3.7.3.
If the check fails, will exit the python interpreter with error status.
:param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
"""
try:
msg_update_callback.general_no_color(
"Required Python version: >= 3.7.3\n"
f"Installed Python version: {sys.version}")
if sys.version_info < (3, 7, 3):
raise Exception("Python version should be 3.7.3 or greater")
except Exception as err:
msg_update_callback.error(str(err))
sys.exit(1)
def str_to_bool(s):
"""Convert string boolean values to bool.
The conversion is case insensitive.
:param s: input string
:return: True if val is ['true' or 'yes' or 'y'] otherwise False
"""
return str(s).lower() in ('true', 'yes', 'y')
def get_sha256(filepath):
"""Get sha256 hash of file as a string.
:param str filepath: path to file.
:return: sha256 string for the file.
:rtype: str
"""
sha256 = hashlib.sha256()
with open(filepath, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
sha256.update(data)
return sha256.hexdigest()
def check_file_permissions(filename, msg_update_callback=NullPrinter()):
"""Ensure that the file has correct permissions.
Unix based system:
Owner - r/w permission
Other - No access
Windows:
No check
:param str filename: path to file.
:param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
:raises Exception: if file has 'x' permissions for Owner or 'rwx'
permissions for 'Others' or 'Group'.
"""
if os.name == 'nt':
return
err_msgs = []
file_mode = os.stat(filename).st_mode
if file_mode & stat.S_IXUSR:
msg = f"Remove execute permission of the Owner for the file {filename}"
msg_update_callback.error(msg)
err_msgs.append(msg)
if file_mode & stat.S_IROTH or file_mode & stat.S_IWOTH \
or file_mode & stat.S_IXOTH:
msg = f"Remove read, write and execute permissions of Others for " \
f"the file {filename}"
msg_update_callback.error(msg)
err_msgs.append(msg)
if file_mode & stat.S_IRGRP or file_mode & stat.S_IWGRP \
or file_mode & stat.S_IXGRP:
msg = f"Remove read, write and execute permissions of Group for the " \
f"file {filename}"
msg_update_callback.error(msg)
err_msgs.append(msg)
if err_msgs:
raise IOError(err_msgs)
def download_file(url, filepath, sha256=None, force_overwrite=False,
logger=NULL_LOGGER, msg_update_callback=NullPrinter()):
"""Download a file from a url to local filepath.
Will not overwrite files unless @sha256 is given.
Recursively creates specified directories in @filepath.
:param str url: source url.
:param str filepath: destination filepath.
:param str sha256: without this argument, if a file already exists at
@filepath, download will be skipped. If @sha256 matches the file's
sha256, download will be skipped.
:param bool force_overwrite: if True, will download the file even if it
already exists or its SHA hasn't changed.
:param logging.Logger logger: logger to log with.
:param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
:raises HTTPError: if the response has an error status code
"""
path = pathlib.Path(filepath)
if not force_overwrite and path.is_file() and \
(sha256 is None or get_sha256(filepath) == sha256):
msg = f"Skipping download to '{filepath}' (file already exists)"
logger.info(msg)
msg_update_callback.general(msg)
return
path.parent.mkdir(parents=True, exist_ok=True)
msg = f"Downloading file from '{url}' to '{filepath}'..."
logger.info(msg)
msg_update_callback.info(msg)
response = requests.get(url, stream=True,
headers={'Cache-Control': 'no-cache'})
response.raise_for_status()
with path.open(mode='wb') as f:
for chunk in response.iter_content(chunk_size=SIZE_1MB):
f.write(chunk)
msg = "Download complete"
logger.info(msg)
msg_update_callback.general(msg)
def read_data_file(filepath, logger=NULL_LOGGER,
msg_update_callback=NullPrinter()):
"""Retrieve file content from local disk as a string.
:param str filepath: absolute filepath of the file, whose content we want
to read.
:param logging.Logger logger: logger to log with.
:param utils.ConsoleMessagePrinter msg_update_callback: Callback object.
:return: the contents of the file.
:rtype: str
:raises FileNotFoundError: if requested data file cannot be
found.
"""
path = pathlib.Path(filepath)
try:
contents = path.read_text()
except FileNotFoundError as err:
msg_update_callback.error(f"{err}")
logger.error(f"{err}", exc_info=True)
raise
msg = f"Found data file: {path}"
msg_update_callback.general(msg)
logger.debug(msg)
return contents
def flatten_dictionary(input_dict, parent_key='', separator='.'):
"""Flatten a given dictionary with nested dictionaries if any.
Example: { 'a' : {'b':'c', 'd': {'e' : 'f'}}, 'g' : 'h'} will be flattened
to {'a.b': 'c', 'a.d.e': 'f', 'g': 'h'}
This will flatten only the values of dict type.
:param dict input_dict:
:param str parent_key: parent key that gets prefixed while forming flattened key # noqa: E501
:param str separator: use the separator to form flattened key
:return: flattened dictionary
:rtype: dict
"""
flattened_dict = {}
for k in input_dict.keys():
val = input_dict.get(k)
key_prefix = f"{parent_key}{k}"
if isinstance(val, dict):
flattened_dict.update(flatten_dictionary(val, f"{key_prefix}{separator}")) # noqa: E501
else:
flattened_dict.update({key_prefix: val})
return flattened_dict
def escape_query_filter_expression_value(value):
value_str = str(value)
value_str = value_str.replace('(', "\\(")
value_str = value_str.replace(')', "\\)")
value_str = value_str.replace(';', "\\;")
value_str = value_str.replace(',', "\\,")
return value_str
def construct_filter_string(filters: dict):
"""Construct &-ed filter string from the dict.
:param dict filters: dictionary containing key and values for the filters
"""
filter_string = ""
if filters:
filter_expressions = []
for (key, value) in filters.items():
if key and value:
filter_exp = f"{key}=={urllib.parse.quote(escape_query_filter_expression_value(value))}" # noqa: E501
filter_expressions.append(filter_exp)
filter_string = ";".join(filter_expressions)
return filter_string
def extract_id_from_href(href):
"""Extract id from an href.
'https://vmware.com/api/admin/user/123456' will return 123456
:param str href: an href
:return: id
"""
if not href:
return None
if '/' in href:
return href.split('/')[-1]
return href
# ToDo: Device a better way to find the max api version
# without converting the strings to float.
# e.g. 5.20 will be smaller than 5.8 if compared as float, which is wrong
def get_max_api_version(api_versions: List[str]) -> str:
return str(max(VCDApiVersion(x) for x in api_versions))
|
py
|
1a5b154c6ff2f13a479eeaf765fb44456cb7ebb9
|
# Silvius microphone client based on Tanel's client.py
__author__ = 'dwk'
import argparse
from ws4py.client.threadedclient import WebSocketClient
import threading
import sys
import urllib
import json
reconnect_mode = False
fatal_error = False
class MyClient(WebSocketClient):
def __init__(self, url, mic=1, protocols=None, extensions=None, heartbeat_freq=None, byterate=16000,
show_hypotheses=True,
save_adaptation_state_filename=None, send_adaptation_state_filename=None, audio_gate=0):
super(MyClient, self).__init__(url, protocols, extensions, heartbeat_freq)
self.mic = mic
self.show_hypotheses = show_hypotheses
self.byterate = byterate
self.save_adaptation_state_filename = save_adaptation_state_filename
self.send_adaptation_state_filename = send_adaptation_state_filename
self.chunk = 0
self.audio_gate = audio_gate
def send_data(self, data):
self.send(data, binary=True)
def opened(self):
import pyaudio
import audioop
pa = pyaudio.PyAudio()
sample_rate = self.byterate
stream = None
while stream is None:
try:
# try adjusting this if you want fewer network packets
self.chunk = 2048 * 2 * sample_rate / self.byterate
mic = self.mic
if mic == -1:
mic = pa.get_default_input_device_info()['index']
print >> sys.stderr, "Selecting default mic"
print >> sys.stderr, "Using mic #", mic
stream = pa.open(
rate = sample_rate,
format = pyaudio.paInt16,
channels = 1,
input = True,
input_device_index = mic,
frames_per_buffer = self.chunk)
except IOError, e:
if(e.errno == -9997 or e.errno == 'Invalid sample rate'):
new_sample_rate = int(pa.get_device_info_by_index(mic)['defaultSampleRate'])
if(sample_rate != new_sample_rate):
sample_rate = new_sample_rate
continue
print >> sys.stderr, "\n", e
print >> sys.stderr, "\nCould not open microphone. Please try a different device."
global fatal_error
fatal_error = True
sys.exit(0)
def mic_to_ws(): # uses stream
try:
print >> sys.stderr, "\nLISTENING TO MICROPHONE"
last_state = None
while True:
data = stream.read(self.chunk)
if self.audio_gate > 0:
rms = audioop.rms(data, 2)
if rms < self.audio_gate:
data = '\00' * len(data)
#if sample_chan == 2:
# data = audioop.tomono(data, 2, 1, 1)
if sample_rate != self.byterate:
(data, last_state) = audioop.ratecv(data, 2, 1, sample_rate, self.byterate, last_state)
self.send_data(data)
except IOError, e:
# usually a broken pipe
print e
except AttributeError:
# currently raised when the socket gets closed by main thread
pass
# to voluntarily close the connection, we would use
#self.send_data("")
#self.send("EOS")
try:
self.close()
except IOError:
pass
threading.Thread(target=mic_to_ws).start()
def received_message(self, m):
response = json.loads(str(m))
#print >> sys.stderr, "RESPONSE:", response
#print >> sys.stderr, "JSON was:", m
if response['status'] == 0:
if 'result' in response:
trans = response['result']['hypotheses'][0]['transcript']
if response['result']['final']:
if self.show_hypotheses:
print >> sys.stderr, '\r%s' % trans.replace("\n", "\\n")
print '%s' % trans.replace("\n", "\\n") # final result!
sys.stdout.flush()
elif self.show_hypotheses:
print_trans = trans.replace("\n", "\\n")
if len(print_trans) > 80:
print_trans = "... %s" % print_trans[-76:]
print >> sys.stderr, '\r%s' % print_trans,
if 'adaptation_state' in response:
if self.save_adaptation_state_filename:
print >> sys.stderr, "Saving adaptation state to %s" % self.save_adaptation_state_filename
with open(self.save_adaptation_state_filename, "w") as f:
f.write(json.dumps(response['adaptation_state']))
else:
print >> sys.stderr, "Received error from server (status %d)" % response['status']
if 'message' in response:
print >> sys.stderr, "Error message:", response['message']
global reconnect_mode
if reconnect_mode:
import time
print >> sys.stderr, "Sleeping for five seconds before reconnecting"
time.sleep(5)
def closed(self, code, reason=None):
#print "Websocket closed() called"
#print >> sys.stderr
pass
def setup():
content_type = "audio/x-raw, layout=(string)interleaved, rate=(int)16000, format=(string)S16LE, channels=(int)1"
path = 'client/ws/speech'
parser = argparse.ArgumentParser(description='Microphone client for silvius')
parser.add_argument('-s', '--server', default="localhost", dest="server", help="Speech-recognition server")
parser.add_argument('-p', '--port', default="8019", dest="port", help="Server port")
#parser.add_argument('-r', '--rate', default=16000, dest="rate", type=int, help="Rate in bytes/sec at which audio should be sent to the server.")
parser.add_argument('-d', '--device', default="-1", dest="device", type=int, help="Select a different microphone (give device ID)")
parser.add_argument('-k', '--keep-going', action="store_true", help="Keep reconnecting to the server after periods of silence")
parser.add_argument('--save-adaptation-state', help="Save adaptation state to file")
parser.add_argument('--send-adaptation-state', help="Send adaptation state from file")
parser.add_argument('--content-type', default=content_type, help="Use the specified content type (default is " + content_type + ")")
parser.add_argument('--hypotheses', default=True, type=int, help="Show partial recognition hypotheses (default: 1)")
parser.add_argument('-g', '--audio-gate', default=0, type=int, help="Audio-gate level to reduce detections when not talking")
args = parser.parse_args()
content_type = args.content_type
print >> sys.stderr, "Content-Type:", content_type
if(args.keep_going):
global reconnect_mode
global fatal_error
reconnect_mode = True
while(fatal_error == False):
print >> sys.stderr, "Reconnecting..."
run(args, content_type, path)
else:
run(args, content_type, path)
def run(args, content_type, path):
uri = "ws://%s:%s/%s?%s" % (args.server, args.port, path, urllib.urlencode([("content-type", content_type)]))
print >> sys.stderr, "Connecting to", uri
ws = MyClient(uri, byterate=16000, mic=args.device, show_hypotheses=args.hypotheses,
save_adaptation_state_filename=args.save_adaptation_state, send_adaptation_state_filename=args.send_adaptation_state, audio_gate=args.audio_gate)
ws.connect()
#result = ws.get_full_hyp()
#print result.encode('utf-8')
ws.run_forever()
def main():
try:
setup()
except KeyboardInterrupt:
print >> sys.stderr, "\nexiting..."
if __name__ == "__main__":
main()
|
py
|
1a5b155241df44baf63b533e1a79bc07cd38077a
|
import pandas as pd
# Lists are enclosed in brackets:
# l = [1, 2, "a"]
# Tuples are enclosed in parentheses:
# Tuples are faster and consume less memory
# t = (1, 2, "a")
# Dictionaries are built with curly brackets:
# d = {"a":1, "b":2}
# Sets are made using the set() builtin function
# Python List vs. Tuples (Key points to remember)
# The literal syntax of tuples is shown by parentheses ()
# whereas the literal syntax of lists is shown by square brackets []
# Lists has variable length, tuple has fixed length.
# List has mutable nature, tuple has immutable nature.
# List has more functionality than the tuple.
# Basics of creating Pandas DataFrames from Lists and Dictionaries
# http://pbpython.com/pandas-list-dict.html
# https://www.datacamp.com/community/tutorials/pandas-read-csv
def csv_2sql( csv_file_name, table_name ):
data = pd.read_csv( csv_file_name )
# Get the first 5 rows
# print( data.head() )
rows, c_count = data.shape
print( "# Number of rows={} and columns={}".format(rows, c_count ) )
p = ' '
print( "sql = '''")
print( "CREATE TABLE {} ( ".format(table_name) )
i = 0
for col in data.columns:
i = i+1
t = data[col].dtype
if t == 'int64':
t = "INTEGER"
else:
t = "LVARCHAR"
if i == c_count:
print( p, col, t, " ); " )
else :
print( p, col, t, "," )
print( "'''")
print( )
print( "sql = '''")
print( "INSERT INTO {} ( ".format(table_name) )
i = 0
for col in data.columns:
i = i+1
if i == c_count:
print( p, col, " ) " )
else :
print( p, col, "," )
# Python 3 specific ( end = ''), to print on same line
print( p, "VALUES ( ", end = '' )
i = 0
while i < c_count:
i = i+1
if i == c_count:
print( " ? ); " )
else :
# Python 3 specific
print( "?, ", end = '' )
print( "'''")
print( "stmt = IfxPy.prepare(conn, sql)" )
i = 0
for col in data.columns:
i = i+1
t = data[col].dtype
if t == 'int64':
t = "INTEGER"
else:
t = "LVARCHAR"
print()
print( "c{} = None".format(i) )
print( "IfxPy.bind_param(stmt, {}, c{}, IfxPy.SQL_PARAM_INPUT, IfxPy.{})".format(i, i, t ) )
####### Run the sample function ######
if __name__ == "__main__":
csv_2sql('sample.csv', 'tab1')
|
py
|
1a5b15ca82663ccba873c10a15f9faeddf2f8339
|
#!/usr/bin/env python
# Copyright 2014 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# This script retrieves the history of all V8 branches and trunk revisions and
# their corresponding Chromium revisions.
# Requires a chromium checkout with branch heads:
# gclient sync --with_branch_heads
# gclient fetch
import argparse
import csv
import itertools
import json
import os
import re
import sys
from common_includes import *
CONFIG = {
"BRANCHNAME": "retrieve-v8-releases",
"PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
}
# Expression for retrieving the bleeding edge revision from a commit message.
PUSH_MSG_SVN_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
PUSH_MSG_GIT_RE = re.compile(r".* \(based on ([a-fA-F0-9]+)\)$")
# Expression for retrieving the merged patches from a merge commit message
# (old and new format).
MERGE_MESSAGE_RE = re.compile(r"^.*[M|m]erged (.+)(\)| into).*$", re.M)
CHERRY_PICK_TITLE_GIT_RE = re.compile(r"^.* \(cherry\-pick\)\.?$")
# New git message for cherry-picked CLs. One message per line.
MERGE_MESSAGE_GIT_RE = re.compile(r"^Merged ([a-fA-F0-9]+)\.?$")
# Expression for retrieving reverted patches from a commit message (old and
# new format).
ROLLBACK_MESSAGE_RE = re.compile(r"^.*[R|r]ollback of (.+)(\)| in).*$", re.M)
# New git message for reverted CLs. One message per line.
ROLLBACK_MESSAGE_GIT_RE = re.compile(r"^Rollback of ([a-fA-F0-9]+)\.?$")
# Expression for retrieving the code review link.
REVIEW_LINK_RE = re.compile(r"^Review URL: (.+)$", re.M)
# Expression with three versions (historical) for extracting the v8 revision
# from the chromium DEPS file.
DEPS_RE = re.compile(r"""^\s*(?:["']v8_revision["']: ["']"""
"""|\(Var\("googlecode_url"\) % "v8"\) \+ "\/trunk@"""
"""|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)"""
"""([^"']+)["'].*$""", re.M)
# Expression to pick tag and revision for bleeding edge tags. To be used with
# output of 'svn log'.
BLEEDING_EDGE_TAGS_RE = re.compile(
r"A \/tags\/([^\s]+) \(from \/branches\/bleeding_edge\:(\d+)\)")
def SortBranches(branches):
"""Sort branches with version number names."""
return sorted(branches, key=SortingKey, reverse=True)
def FilterDuplicatesAndReverse(cr_releases):
"""Returns the chromium releases in reverse order filtered by v8 revision
duplicates.
cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
"""
last = ""
result = []
for release in reversed(cr_releases):
if last == release[1]:
continue
last = release[1]
result.append(release)
return result
def BuildRevisionRanges(cr_releases):
"""Returns a mapping of v8 revision -> chromium ranges.
The ranges are comma-separated, each range has the form R1:R2. The newest
entry is the only one of the form R1, as there is no end range.
cr_releases is a list of [cr_rev, v8_rev] reverse-sorted by cr_rev.
cr_rev either refers to a chromium svn revision or a chromium branch number.
"""
range_lists = {}
cr_releases = FilterDuplicatesAndReverse(cr_releases)
# Visit pairs of cr releases from oldest to newest.
for cr_from, cr_to in itertools.izip(
cr_releases, itertools.islice(cr_releases, 1, None)):
# Assume the chromium revisions are all different.
assert cr_from[0] != cr_to[0]
# TODO(machenbach): Subtraction is not git friendly.
ran = "%s:%d" % (cr_from[0], int(cr_to[0]) - 1)
# Collect the ranges in lists per revision.
range_lists.setdefault(cr_from[1], []).append(ran)
# Add the newest revision.
if cr_releases:
range_lists.setdefault(cr_releases[-1][1], []).append(cr_releases[-1][0])
# Stringify and comma-separate the range lists.
return dict((rev, ", ".join(ran)) for rev, ran in range_lists.iteritems())
def MatchSafe(match):
if match:
return match.group(1)
else:
return ""
class Preparation(Step):
MESSAGE = "Preparation."
def RunStep(self):
self.CommonPrepare()
self.PrepareBranch()
class RetrieveV8Releases(Step):
MESSAGE = "Retrieve all V8 releases."
def ExceedsMax(self, releases):
return (self._options.max_releases > 0
and len(releases) > self._options.max_releases)
def GetBleedingEdgeGitFromPush(self, title):
return MatchSafe(PUSH_MSG_GIT_RE.match(title))
def GetMergedPatches(self, body):
patches = MatchSafe(MERGE_MESSAGE_RE.search(body))
if not patches:
patches = MatchSafe(ROLLBACK_MESSAGE_RE.search(body))
if patches:
# Indicate reverted patches with a "-".
patches = "-%s" % patches
return patches
def GetMergedPatchesGit(self, body):
patches = []
for line in body.splitlines():
patch = MatchSafe(MERGE_MESSAGE_GIT_RE.match(line))
if patch:
patches.append(patch)
patch = MatchSafe(ROLLBACK_MESSAGE_GIT_RE.match(line))
if patch:
patches.append("-%s" % patch)
return ", ".join(patches)
def GetReleaseDict(
self, git_hash, bleeding_edge_rev, bleeding_edge_git, branch, version,
patches, cl_body):
revision = self.GetCommitPositionNumber(git_hash)
return {
# The cr commit position number on the branch.
"revision": revision,
# The git revision on the branch.
"revision_git": git_hash,
# The cr commit position number on master.
"bleeding_edge": bleeding_edge_rev,
# The same for git.
"bleeding_edge_git": bleeding_edge_git,
# The branch name.
"branch": branch,
# The version for displaying in the form 3.26.3 or 3.26.3.12.
"version": version,
# The date of the commit.
"date": self.GitLog(n=1, format="%ci", git_hash=git_hash),
# Merged patches if available in the form 'r1234, r2345'.
"patches_merged": patches,
# Default for easier output formatting.
"chromium_revision": "",
# Default for easier output formatting.
"chromium_branch": "",
# Link to the CL on code review. Trunk pushes are not uploaded, so this
# field will be populated below with the recent roll CL link.
"review_link": MatchSafe(REVIEW_LINK_RE.search(cl_body)),
# Link to the commit message on google code.
"revision_link": ("https://code.google.com/p/v8/source/detail?r=%s"
% revision),
}
def GetRelease(self, git_hash, branch):
self.ReadAndPersistVersion()
base_version = [self["major"], self["minor"], self["build"]]
version = ".".join(base_version)
body = self.GitLog(n=1, format="%B", git_hash=git_hash)
patches = ""
if self["patch"] != "0":
version += ".%s" % self["patch"]
if CHERRY_PICK_TITLE_GIT_RE.match(body.splitlines()[0]):
patches = self.GetMergedPatchesGit(body)
else:
patches = self.GetMergedPatches(body)
title = self.GitLog(n=1, format="%s", git_hash=git_hash)
bleeding_edge_git = self.GetBleedingEdgeGitFromPush(title)
bleeding_edge_position = ""
if bleeding_edge_git:
bleeding_edge_position = self.GetCommitPositionNumber(bleeding_edge_git)
# TODO(machenbach): Add the commit position number.
return self.GetReleaseDict(
git_hash, bleeding_edge_position, bleeding_edge_git, branch, version,
patches, body), self["patch"]
def GetReleasesFromMaster(self):
# TODO(machenbach): Implement this in git as soon as we tag again on
# master.
# tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v
# --limit 20")
# releases = []
# for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
# git_hash = self.vc.SvnGit(revision)
# Add bleeding edge release. It does not contain patches or a code
# review link, as tags are not uploaded.
# releases.append(self.GetReleaseDict(
# git_hash, revision, git_hash, self.vc.MasterBranch(), tag, "", ""))
return []
def GetReleasesFromBranch(self, branch):
self.GitReset(self.vc.RemoteBranch(branch))
if branch == self.vc.MasterBranch():
return self.GetReleasesFromMaster()
releases = []
try:
for git_hash in self.GitLog(format="%H").splitlines():
if VERSION_FILE not in self.GitChangedFiles(git_hash):
continue
if self.ExceedsMax(releases):
break # pragma: no cover
if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
break # pragma: no cover
release, patch_level = self.GetRelease(git_hash, branch)
releases.append(release)
# Follow branches only until their creation point.
# TODO(machenbach): This omits patches if the version file wasn't
# manipulated correctly. Find a better way to detect the point where
# the parent of the branch head leads to the trunk branch.
if branch != self.vc.CandidateBranch() and patch_level == "0":
break
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up checked-out version file.
self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
return releases
def RunStep(self):
self.GitCreateBranch(self._config["BRANCHNAME"])
branches = self.vc.GetBranches()
releases = []
if self._options.branch == 'recent':
# Get only recent development on trunk, beta and stable.
if self._options.max_releases == 0: # pragma: no cover
self._options.max_releases = 10
beta, stable = SortBranches(branches)[0:2]
releases += self.GetReleasesFromBranch(stable)
releases += self.GetReleasesFromBranch(beta)
releases += self.GetReleasesFromBranch(self.vc.CandidateBranch())
releases += self.GetReleasesFromBranch(self.vc.MasterBranch())
elif self._options.branch == 'all': # pragma: no cover
# Retrieve the full release history.
for branch in branches:
releases += self.GetReleasesFromBranch(branch)
releases += self.GetReleasesFromBranch(self.vc.CandidateBranch())
releases += self.GetReleasesFromBranch(self.vc.MasterBranch())
else: # pragma: no cover
# Retrieve history for a specified branch.
assert self._options.branch in (branches +
[self.vc.CandidateBranch(), self.vc.MasterBranch()])
releases += self.GetReleasesFromBranch(self._options.branch)
self["releases"] = sorted(releases,
key=lambda r: SortingKey(r["version"]),
reverse=True)
class SwitchChromium(Step):
MESSAGE = "Switch to Chromium checkout."
def RunStep(self):
cwd = self._options.chromium
# Check for a clean workdir.
if not self.GitIsWorkdirClean(cwd=cwd): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
# Assert that the DEPS file is there.
if not os.path.exists(os.path.join(cwd, "DEPS")): # pragma: no cover
self.Die("DEPS file not present.")
class UpdateChromiumCheckout(Step):
MESSAGE = "Update the checkout and create a new branch."
def RunStep(self):
cwd = self._options.chromium
self.GitCheckout("master", cwd=cwd)
self.GitPull(cwd=cwd)
self.GitCreateBranch(self.Config("BRANCHNAME"), cwd=cwd)
def ConvertToCommitNumber(step, revision):
# Simple check for git hashes.
if revision.isdigit() and len(revision) < 8:
return revision
return step.GetCommitPositionNumber(
revision, cwd=os.path.join(step._options.chromium, "v8"))
class RetrieveChromiumV8Releases(Step):
MESSAGE = "Retrieve V8 releases from Chromium DEPS."
def RunStep(self):
cwd = self._options.chromium
releases = filter(
lambda r: r["branch"] in [self.vc.CandidateBranch(),
self.vc.MasterBranch()],
self["releases"])
if not releases: # pragma: no cover
print "No releases detected. Skipping chromium history."
return True
# Update v8 checkout in chromium.
self.GitFetchOrigin(cwd=os.path.join(cwd, "v8"))
oldest_v8_rev = int(releases[-1]["revision"])
cr_releases = []
try:
for git_hash in self.GitLog(
format="%H", grep="V8", cwd=cwd).splitlines():
if "DEPS" not in self.GitChangedFiles(git_hash, cwd=cwd):
continue
if not self.GitCheckoutFileSafe("DEPS", git_hash, cwd=cwd):
break # pragma: no cover
deps = FileToText(os.path.join(cwd, "DEPS"))
match = DEPS_RE.search(deps)
if match:
cr_rev = self.GetCommitPositionNumber(git_hash, cwd=cwd)
if cr_rev:
v8_rev = ConvertToCommitNumber(self, match.group(1))
cr_releases.append([cr_rev, v8_rev])
# Stop after reaching beyond the last v8 revision we want to update.
# We need a small buffer for possible revert/reland frenzies.
# TODO(machenbach): Subtraction is not git friendly.
if int(v8_rev) < oldest_v8_rev - 100:
break # pragma: no cover
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up.
self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
# Add the chromium ranges to the v8 trunk and bleeding_edge releases.
all_ranges = BuildRevisionRanges(cr_releases)
releases_dict = dict((r["revision"], r) for r in releases)
for revision, ranges in all_ranges.iteritems():
releases_dict.get(revision, {})["chromium_revision"] = ranges
# TODO(machenbach): Unify common code with method above.
class RietrieveChromiumBranches(Step):
MESSAGE = "Retrieve Chromium branch information."
def RunStep(self):
cwd = self._options.chromium
trunk_releases = filter(lambda r: r["branch"] == self.vc.CandidateBranch(),
self["releases"])
if not trunk_releases: # pragma: no cover
print "No trunk releases detected. Skipping chromium history."
return True
oldest_v8_rev = int(trunk_releases[-1]["revision"])
# Filter out irrelevant branches.
branches = filter(lambda r: re.match(r"branch-heads/\d+", r),
self.GitRemotes(cwd=cwd))
# Transform into pure branch numbers.
branches = map(lambda r: int(re.match(r"branch-heads/(\d+)", r).group(1)),
branches)
branches = sorted(branches, reverse=True)
cr_branches = []
try:
for branch in branches:
if not self.GitCheckoutFileSafe("DEPS",
"branch-heads/%d" % branch,
cwd=cwd):
break # pragma: no cover
deps = FileToText(os.path.join(cwd, "DEPS"))
match = DEPS_RE.search(deps)
if match:
v8_rev = ConvertToCommitNumber(self, match.group(1))
cr_branches.append([str(branch), v8_rev])
# Stop after reaching beyond the last v8 revision we want to update.
# We need a small buffer for possible revert/reland frenzies.
# TODO(machenbach): Subtraction is not git friendly.
if int(v8_rev) < oldest_v8_rev - 100:
break # pragma: no cover
# Allow Ctrl-C interrupt.
except (KeyboardInterrupt, SystemExit): # pragma: no cover
pass
# Clean up.
self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
# Add the chromium branches to the v8 trunk releases.
all_ranges = BuildRevisionRanges(cr_branches)
trunk_dict = dict((r["revision"], r) for r in trunk_releases)
for revision, ranges in all_ranges.iteritems():
trunk_dict.get(revision, {})["chromium_branch"] = ranges
class CleanUp(Step):
MESSAGE = "Clean up."
def RunStep(self):
self.GitCheckout("master", cwd=self._options.chromium)
self.GitDeleteBranch(self.Config("BRANCHNAME"), cwd=self._options.chromium)
self.CommonCleanup()
class WriteOutput(Step):
MESSAGE = "Print output."
def Run(self):
if self._options.csv:
with open(self._options.csv, "w") as f:
writer = csv.DictWriter(f,
["version", "branch", "revision",
"chromium_revision", "patches_merged"],
restval="",
extrasaction="ignore")
for release in self["releases"]:
writer.writerow(release)
if self._options.json:
with open(self._options.json, "w") as f:
f.write(json.dumps(self["releases"]))
if not self._options.csv and not self._options.json:
print self["releases"] # pragma: no cover
class Releases(ScriptsBase):
def _PrepareOptions(self, parser):
parser.add_argument("-b", "--branch", default="recent",
help=("The branch to analyze. If 'all' is specified, "
"analyze all branches. If 'recent' (default) "
"is specified, track beta, stable and trunk."))
parser.add_argument("-c", "--chromium",
help=("The path to your Chromium src/ "
"directory to automate the V8 roll."))
parser.add_argument("--csv", help="Path to a CSV file for export.")
parser.add_argument("-m", "--max-releases", type=int, default=0,
help="The maximum number of releases to track.")
parser.add_argument("--json", help="Path to a JSON file for export.")
def _ProcessOptions(self, options): # pragma: no cover
return True
def _Config(self):
return {
"BRANCHNAME": "retrieve-v8-releases",
"PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
}
def _Steps(self):
return [
Preparation,
RetrieveV8Releases,
SwitchChromium,
UpdateChromiumCheckout,
RetrieveChromiumV8Releases,
RietrieveChromiumBranches,
CleanUp,
WriteOutput,
]
if __name__ == "__main__": # pragma: no cover
sys.exit(Releases().Run())
|
py
|
1a5b161ab813c186f99175ed82418b7c7119cbc8
|
##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from [email protected]. #
##############################################################################
import unittest
import requests
import json
import time
import helper
import config
# min/max chunk size - these can be set by config, but
# practially the min config value should be larger than
# CHUNK_MIN and the max config value should less than
# CHUNK_MAX
CHUNK_MIN = 1024 # lower limit (1024b)
CHUNK_MAX = 50*1024*1024 # upper limit (50M)
class DatasetTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(DatasetTest, self).__init__(*args, **kwargs)
self.base_domain = helper.getTestDomainName(self.__class__.__name__)
helper.setupDomain(self.base_domain, folder=True)
self.endpoint = helper.getEndpoint()
# main
def testScalarDataset(self):
# Test creation/deletion of scalar dataset obj
domain = self.base_domain + "/testScalarDataset.h5"
helper.setupDomain(domain)
print("testScalarDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
req = self.endpoint + '/'
# Get root uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
# create a dataset obj
data = { "type": "H5T_IEEE_F32LE" }
req = self.endpoint + '/datasets'
rsp = requests.post(req, data=json.dumps(data), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["attributeCount"], 0)
dset_id = rspJson["id"]
self.assertTrue(helper.validateId(dset_id))
# read back the obj
req = self.endpoint + '/datasets/' + dset_id
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
for name in ("id", "shape", "hrefs", "layout", "creationProperties",
"attributeCount", "created", "lastModified", "root", "domain"):
self.assertTrue(name in rspJson)
self.assertEqual(rspJson["id"], dset_id)
self.assertEqual(rspJson["root"], root_uuid)
self.assertEqual(rspJson["domain"], domain)
self.assertEqual(rspJson["attributeCount"], 0)
shape_json = rspJson["shape"]
self.assertTrue(shape_json["class"], "H5S_SCALAR")
self.assertTrue(rspJson["type"], "H5T_IEEE_F32LE")
# Get the type
rsp = requests.get(req + "/type", headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("type" in rspJson)
self.assertTrue(rspJson["type"], "H5T_IEEE_F32LE")
self.assertTrue("hrefs" in rspJson)
hrefs = rspJson["hrefs"]
self.assertEqual(len(hrefs), 3)
# Get the shape
rsp = requests.get(req + "/shape", headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("created" in rspJson)
self.assertTrue("lastModified" in rspJson)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("shape" in rspJson)
shape_json = rspJson["shape"]
self.assertTrue(shape_json["class"], "H5S_SCALAR")
# try getting verbose info
params = {"verbose": 1}
rsp = requests.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
for name in ("id", "shape", "hrefs", "layout", "creationProperties",
"attributeCount", "created", "lastModified", "root", "domain"):
self.assertTrue(name in rspJson)
#self.assertTrue("num_chunks" in rspJson)
#self.assertTrue("allocated_size" in rspJson)
# try get with a different user (who has read permission)
headers = helper.getRequestHeaders(domain=domain, username="test_user2")
rsp = requests.get(req, headers=headers)
if config.get("default_public"):
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["id"], dset_id)
else:
self.assertEqual(rsp.status_code, 403)
# try to do a GET with a different domain (should fail)
another_domain = self.base_domain + "/testScalarDataset2.h5"
helper.setupDomain(another_domain)
print("testScalarDataset2", another_domain)
headers = helper.getRequestHeaders(domain=another_domain)
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 400)
# try DELETE with user who doesn't have create permission on this domain
headers = helper.getRequestHeaders(domain=domain, username="test_user2")
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 403) # forbidden
# try to do a DELETE with a different domain (should fail)
# Test creation/deletion of scalar dataset obj
headers = helper.getRequestHeaders(domain=another_domain)
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 400)
# delete the dataset
headers = helper.getRequestHeaders(domain=domain)
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue(rspJson is not None)
# a get for the dataset should now return 410 (GONE)
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 410)
def testScalarEmptyDimsDataset(self):
# Test creation/deletion of scalar dataset obj
domain = self.base_domain + "/testScalarEmptyDimsDataset.h5"
helper.setupDomain(domain)
print("testScalarEmptyDimsDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
req = self.endpoint + '/'
# Get root uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
root_uuid = rspJson["root"]
helper.validateId(root_uuid)
# create a dataset obj
data = { "type": "H5T_IEEE_F32LE", "shape": [] }
req = self.endpoint + '/datasets'
rsp = requests.post(req, data=json.dumps(data), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["attributeCount"], 0)
dset_id = rspJson["id"]
self.assertTrue(helper.validateId(dset_id))
# read back the obj
req = self.endpoint + '/datasets/' + dset_id
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("id" in rspJson)
self.assertEqual(rspJson["id"], dset_id)
self.assertTrue("created" in rspJson)
self.assertTrue("lastModified" in rspJson)
self.assertTrue("hrefs" in rspJson)
self.assertTrue("attributeCount" in rspJson)
self.assertEqual(rspJson["attributeCount"], 0)
self.assertTrue("shape" in rspJson)
shape_json = rspJson["shape"]
self.assertTrue(shape_json["class"], "H5S_SCALAR")
self.assertFalse("dims" in shape_json)
self.assertTrue("type" in rspJson)
self.assertTrue(rspJson["type"], "H5T_IEEE_F32LE")
def testGet(self):
domain = helper.getTestDomain("tall.h5")
print("testGetDomain", domain)
headers = helper.getRequestHeaders(domain=domain)
# verify domain exists
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
if rsp.status_code != 200:
print("WARNING: Failed to get domain: {}. Is test data setup?".format(domain))
return # abort rest of test
domainJson = json.loads(rsp.text)
root_uuid = domainJson["root"]
# get the dataset uuid
dset_uuid = helper.getUUIDByPath(domain, "/g1/g1.1/dset1.1.1")
self.assertTrue(dset_uuid.startswith("d-"))
# get the dataset json
req = helper.getEndpoint() + '/datasets/' + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
for name in ("id", "shape", "hrefs", "layout", "creationProperties",
"attributeCount", "created", "lastModified", "root", "domain"):
self.assertTrue(name in rspJson)
self.assertEqual(rspJson["id"], dset_uuid)
self.assertEqual(rspJson["root"], root_uuid)
self.assertEqual(rspJson["domain"], domain)
hrefs = rspJson["hrefs"]
self.assertEqual(len(hrefs), 5)
self.assertEqual(rspJson["id"], dset_uuid)
shape = rspJson["shape"]
for name in ("class", "dims", "maxdims"):
self.assertTrue(name in shape)
self.assertEqual(shape["class"], 'H5S_SIMPLE')
self.assertEqual(shape["dims"], [10,10])
self.assertEqual(shape["maxdims"], [10,10])
layout = rspJson["layout"]
self.assertEqual(layout["class"], 'H5D_CHUNKED')
self.assertEqual(layout["dims"], [10,10])
self.assertTrue("partition_count" not in layout)
type = rspJson["type"]
for name in ("base", "class"):
self.assertTrue(name in type)
self.assertEqual(type["class"], 'H5T_INTEGER')
self.assertEqual(type["base"], 'H5T_STD_I32BE')
self.assertEqual(rspJson["attributeCount"], 2)
# these properties should only be available when verbose is used
self.assertFalse("num_chunks" in rspJson)
self.assertFalse("allocated_size" in rspJson)
# attribute should only be here if include_attrs is used
self.assertFalse("attributes" in rspJson)
now = time.time()
# the object shouldn't have been just created or updated
self.assertTrue(rspJson["created"] < now - 10)
self.assertTrue(rspJson["lastModified"] < now - 10)
# request the dataset path
req = helper.getEndpoint() + '/datasets/' + dset_uuid
params = {"getalias": 1}
rsp = requests.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("alias" in rspJson)
self.assertEqual(rspJson["alias"], ['/g1/g1.1/dset1.1.1'])
# request attributes be included
params = {"include_attrs": 1}
rsp = requests.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("attributes" in rspJson)
attrs = rspJson["attributes"]
self.assertTrue("attr1" in attrs)
self.assertTrue("attr2" in attrs)
def testGetByPath(self):
domain = helper.getTestDomain("tall.h5")
print("testGetDomain", domain)
headers = helper.getRequestHeaders(domain=domain)
# verify domain exists
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
if rsp.status_code != 200:
print("WARNING: Failed to get domain: {}. Is test data setup?".format(domain))
return # abort rest of test
domainJson = json.loads(rsp.text)
root_uuid = domainJson["root"]
# get the dataset at "/g1/g1.1/dset1.1.1"
h5path = "/g1/g1.1/dset1.1.1"
req = helper.getEndpoint() + "/datasets/"
params = {"h5path": h5path}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
for name in ("id", "shape", "hrefs", "layout", "creationProperties",
"attributeCount", "created", "lastModified", "root", "domain"):
self.assertTrue(name in rspJson)
# get the dataset via a relative apth "g1/g1.1/dset1.1.1"
h5path = "g1/g1.1/dset1.1.1"
req = helper.getEndpoint() + "/datasets/"
params = {"h5path": h5path, "grpid": root_uuid}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
for name in ("id", "shape", "hrefs", "layout", "creationProperties",
"attributeCount", "created", "lastModified", "root", "domain"):
self.assertTrue(name in rspJson)
# get the dataset uuid and verify it matches what we got by h5path
dset_uuid = helper.getUUIDByPath(domain, "/g1/g1.1/dset1.1.1")
self.assertTrue(dset_uuid.startswith("d-"))
self.assertEqual(dset_uuid, rspJson["id"])
# try a invalid link and verify a 404 is returened
h5path = "/g1/foobar"
req = helper.getEndpoint() + "/datasets/"
params = {"h5path": h5path}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 404)
# try passing a path to a group and verify we get 404
h5path = "/g1/g1.1"
req = helper.getEndpoint() + "/datasets/"
params = {"h5path": h5path}
rsp = requests.get(req, headers=headers, params=params)
self.assertEqual(rsp.status_code, 404)
def testGetVerbose(self):
domain = helper.getTestDomain("tall.h5")
print("testGetDomain", domain)
headers = helper.getRequestHeaders(domain=domain)
# verify domain exists
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
if rsp.status_code != 200:
print("WARNING: Failed to get domain: {}. Is test data setup?".format(domain))
return # abort rest of test
domainJson = json.loads(rsp.text)
root_uuid = domainJson["root"]
self.assertTrue(helper.validateId(root_uuid))
# get the dataset uuid
dset_uuid = helper.getUUIDByPath(domain, "/g1/g1.1/dset1.1.1")
self.assertTrue(dset_uuid.startswith("d-"))
# get the dataset json
req = helper.getEndpoint() + '/datasets/' + dset_uuid
params = {"verbose": 1}
rsp = requests.get(req, params=params, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
for name in ("id", "shape", "hrefs", "layout", "creationProperties",
"attributeCount", "created", "lastModified", "root", "domain"):
self.assertTrue(name in rspJson)
# these properties should only be available when verbose is used
self.assertTrue("num_chunks" in rspJson)
self.assertTrue("allocated_size" in rspJson)
#self.assertEqual(rspJson["num_chunks"], 1)
#self.assertEqual(rspJson["allocated_size"], 1026) # this will likely change once compression is working
def testDelete(self):
# test Delete
domain = self.base_domain + "/testDelete.h5"
helper.setupDomain(domain)
print("testDelete", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
# create a new dataset
req = helper.getEndpoint() + '/datasets'
rsp = requests.post(req, headers=headers)
data = { "type": "H5T_IEEE_F32LE" }
req = self.endpoint + '/datasets'
rsp = requests.post(req, data=json.dumps(data), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["attributeCount"], 0)
dset_id = rspJson["id"]
self.assertTrue(helper.validateId(dset_id))
# verify we can do a get on the new dataset
req = helper.getEndpoint() + '/datasets/' + dset_id
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("id" in rspJson)
self.assertEqual(rspJson["id"], dset_id)
# try DELETE with user who doesn't have create permission on this domain
headers = helper.getRequestHeaders(domain=domain, username="test_user2")
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 403) # forbidden
# try to do a DELETE with a different domain (should fail)
another_domain = helper.getParentDomain(domain)
headers = helper.getRequestHeaders(domain=another_domain)
req = helper.getEndpoint() + '/datasets/' + dset_id
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 400)
# delete the new dataset
headers = helper.getRequestHeaders(domain)
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue(rspJson is not None)
# a get for the dataset should now return 410 (GONE)
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 410)
def testCompound(self):
# test Dataset with compound type
domain = self.base_domain + "/testCompound.h5"
helper.setupDomain(domain)
print("testCompound", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
fields = ({'name': 'temp', 'type': 'H5T_STD_I32LE'},
{'name': 'pressure', 'type': 'H5T_IEEE_F32LE'})
datatype = {'class': 'H5T_COMPOUND', 'fields': fields }
payload = {'type': datatype, 'shape': 10}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link the new dataset
name = "dset"
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
def testCompoundDuplicateMember(self):
# test Dataset with compound type but field that is repeated
domain = self.base_domain + "/testCompoundDuplicateMember.h5"
helper.setupDomain(domain)
print("testCompoundDupicateMember", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
self.assertTrue(helper.validateId(root_uuid))
fields = ({'name': 'x', 'type': 'H5T_STD_I32LE'},
{'name': 'x', 'type': 'H5T_IEEE_F32LE'})
datatype = {'class': 'H5T_COMPOUND', 'fields': fields }
payload = {'type': datatype, 'shape': 10}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 400) # Bad Request
def testPostNullSpace(self):
# test Dataset with null dataspace type
domain = self.base_domain + "/testPostNullSpace.h5"
helper.setupDomain(domain)
print("testNullSpace", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# pass H5S_NULL for shape
payload = {'type': 'H5T_IEEE_F32LE', 'shape': 'H5S_NULL'}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset1'
name = 'dset1'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify the dataspace is has a null dataspace
req = self.endpoint + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_NULL')
# verify type
type_json = rspJson["type"]
self.assertEqual(type_json["class"], 'H5T_FLOAT')
self.assertEqual(type_json['base'], 'H5T_IEEE_F32LE')
def testResizableDataset(self):
# test Dataset with resizable dimension dataspace type
domain = self.base_domain + "/testResizableDataset.h5"
helper.setupDomain(domain)
print("testResizableDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
payload = {'type': 'H5T_IEEE_F32LE', 'shape': 10, 'maxdims': 20}
payload['creationProperties'] = {'fillValue': 3.12 }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'resizable'
name = 'resizable'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify type and shape
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
type_json = rspJson['type']
self.assertEqual(type_json['class'], 'H5T_FLOAT')
self.assertEqual(type_json['base'], 'H5T_IEEE_F32LE')
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 1)
self.assertEqual(shape['dims'][0], 10)
self.assertTrue('maxdims' in shape)
self.assertEqual(shape['maxdims'][0], 20)
creationProps = rspJson["creationProperties"]
self.assertEqual(creationProps["fillValue"], 3.12)
# verify shape using the GET shape request
req = req + "/shape"
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("type" not in rspJson)
self.assertTrue("shape" in rspJson)
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 1)
self.assertEqual(shape['dims'][0], 10)
self.assertTrue('maxdims' in shape)
self.assertEqual(shape['maxdims'][0], 20)
# resize the dataset to 15 elements
payload = {"shape": 15}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
# verify updated-shape using the GET shape request
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("shape" in rspJson)
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 1)
self.assertEqual(shape['dims'][0], 15) # increased to 15
self.assertTrue('maxdims' in shape)
self.assertEqual(shape['maxdims'][0], 20)
# resize the dataset to 25 elements (should fail)
payload = {"shape": 25}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 409)
def testResizableUnlimitedDataset(self):
# test Dataset with unlimited dimension
domain = self.base_domain + "/testResizableUnlimitedDataset.h5"
helper.setupDomain(domain)
print("testResizableUnlimitedDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
payload = {'type': 'H5T_IEEE_F32LE', 'shape': [10, 20], 'maxdims': [30, 0]}
payload['creationProperties'] = {'fillValue': 3.12 }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'resizable'
name = 'resizable'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify type and shape
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
type_json = rspJson['type']
self.assertEqual(type_json['class'], 'H5T_FLOAT')
self.assertEqual(type_json['base'], 'H5T_IEEE_F32LE')
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 2)
self.assertEqual(shape['dims'][0], 10)
self.assertEqual(shape['dims'][1], 20)
self.assertTrue('maxdims' in shape)
self.assertEqual(shape['maxdims'][0], 30)
self.assertEqual(shape['maxdims'][1], 0)
# verify shape using the GET shape request
req = req + "/shape"
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("type" not in rspJson)
self.assertTrue("shape" in rspJson)
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 2)
self.assertEqual(shape['dims'][0], 10)
self.assertEqual(shape['dims'][1], 20)
self.assertTrue('maxdims' in shape)
self.assertEqual(len(shape['maxdims']), 2)
self.assertEqual(shape['maxdims'][0], 30)
self.assertEqual(shape['maxdims'][1], 0)
# resize the second dimension to 500 elements
payload = {"shape": [10, 500]}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
# verify updated-shape using the GET shape request
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("shape" in rspJson)
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 2)
self.assertEqual(shape['dims'][0], 10)
self.assertEqual(shape['dims'][1], 500)
self.assertTrue('maxdims' in shape)
self.assertEqual(len(shape['maxdims']), 2)
self.assertEqual(shape['maxdims'][0], 30)
self.assertEqual(shape['maxdims'][1], 0)
def testExtendDataset(self):
# test extending dataset
domain = self.base_domain + "/testExtendDataset.h5"
helper.setupDomain(domain)
print("testExtendDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
payload = {'type': 'H5T_STD_I32LE', 'shape': 10, 'maxdims': 20}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'extendable'
name = 'extendable'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify type and shape
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
type_json = rspJson['type']
self.assertEqual(type_json['class'], 'H5T_INTEGER')
self.assertEqual(type_json['base'], 'H5T_STD_I32LE')
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 1)
self.assertEqual(shape['dims'][0], 10)
self.assertTrue('maxdims' in shape)
self.assertEqual(shape['maxdims'][0], 20)
# verify shape using the GET shape request
req = req + "/shape"
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("type" not in rspJson)
self.assertTrue("shape" in rspJson)
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 1)
self.assertEqual(shape['dims'][0], 10)
self.assertTrue('maxdims' in shape)
self.assertEqual(shape['maxdims'][0], 20)
# extend the dataset by 5 elements
payload = {"extend": 5}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
self.assertTrue("selection" in rspJson)
self.assertEqual(rspJson["selection"], "[10:15]")
# verify updated-shape using the GET shape request
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("shape" in rspJson)
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 1)
self.assertEqual(shape['dims'][0], 15) # increased to 15
self.assertTrue('maxdims' in shape)
self.assertEqual(shape['maxdims'][0], 20)
# try extending by 10 elements (should fail)
payload = {"extend": 10}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 409)
def testExtend2DDataset(self):
# test extending dataset with two dimension
domain = self.base_domain + "/testExtend2DDataset.h5"
helper.setupDomain(domain)
print("testExtend2DDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
payload = {'type': 'H5T_STD_I32LE', 'shape': [10,20], 'maxdims':[0,0]}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'extendable'
name = 'extendable'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify type and shape
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
type_json = rspJson['type']
self.assertEqual(type_json['class'], 'H5T_INTEGER')
self.assertEqual(type_json['base'], 'H5T_STD_I32LE')
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 2)
self.assertEqual(shape['dims'][0], 10)
self.assertEqual(shape['dims'][1], 20)
self.assertTrue('maxdims' in shape)
self.assertEqual(shape['maxdims'][0], 0)
# verify shape using the GET shape request
req = req + "/shape"
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("type" not in rspJson)
self.assertTrue("shape" in rspJson)
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 2)
self.assertEqual(shape['dims'][0], 10)
self.assertTrue('maxdims' in shape)
self.assertEqual(shape['maxdims'][0], 0)
# extend the dataset by 5 elements in first dimension
payload = {"extend": 5, "extend_dim": 0}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
self.assertTrue("selection" in rspJson)
self.assertEqual(rspJson["selection"], "[10:15,:]")
# verify updated-shape using the GET shape request
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("shape" in rspJson)
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 2)
self.assertEqual(shape['dims'][0], 15) # increased to 15
self.assertEqual(shape['dims'][1], 20) # still 20
# extend the dataset by 10 elements in second dimension
payload = {"extend": 10, "extend_dim": 1}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
self.assertTrue("selection" in rspJson)
self.assertEqual(rspJson["selection"], "[:,20:30]")
# verify updated-shape using the GET shape request
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("shape" in rspJson)
shape = rspJson['shape']
self.assertEqual(shape['class'], 'H5S_SIMPLE')
self.assertEqual(len(shape['dims']), 2)
self.assertEqual(shape['dims'][0], 15) # increased to 15
self.assertEqual(shape['dims'][1], 30) # increased to 30
def testCreationPropertiesLayoutDataset(self):
# test Dataset with creation property list
domain = self.base_domain + "/testCreationPropertiesLayoutDataset.h5"
helper.setupDomain(domain)
print("testCreationPropertiesLayoutDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
# Create ~1GB dataset
payload = {'type': 'H5T_IEEE_F32LE', 'shape': [365, 780, 1024], 'maxdims': [0, 780, 1024]}
# define a chunk layout with 4 chunks per 'slice'
# chunk size is 798720 bytes
gzip_filter = {'class': 'H5Z_FILTER_DEFLATE', 'id': 1, 'level': 9, 'name': 'deflate'}
payload['creationProperties'] = {'layout': {'class': 'H5D_CHUNKED', 'dims': [1, 390, 512] }, 'filters': [gzip_filter,] }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'chunktest'
name = 'chunktest'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED')
self.assertTrue("dims" in layout_json)
self.assertEqual(layout_json["dims"], [1, 390, 1024])
if config.get("max_chunks_per_folder") > 0:
self.assertTrue("partition_count" in layout_json)
self.assertEqual(layout_json["partition_count"], 10)
# verify compression
self.assertTrue("creationProperties" in rspJson)
cpl = rspJson["creationProperties"]
self.assertTrue("filters") in cpl
filters = cpl["filters"]
self.assertEqual(len(filters), 1)
filter = filters[0]
self.assertTrue("class") in filter
self.assertEqual(filter["class"], 'H5Z_FILTER_DEFLATE')
self.assertTrue("level" in filter)
self.assertEqual(filter["level"], 9)
self.assertTrue("id" in filter)
self.assertEqual(filter["id"], 1)
def testCompressionFiltersDataset(self):
# test Dataset with creation property list
domain = self.base_domain + "/testCompressionFiltersDataset.h5"
helper.setupDomain(domain)
print("testCompressionFiltersDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
compressors = rspJson["compressors"]
self.assertTrue(len(compressors) >= 6)
for compressor in compressors:
# create the dataset
req = self.endpoint + "/datasets"
payload = {'type': 'H5T_IEEE_F32LE', 'shape': [40, 80]}
payload['creationProperties'] = { 'filters': [compressor,] }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset
req = self.endpoint + "/groups/" + root_uuid + "/links/" + compressor
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED')
# verify compression
self.assertTrue("creationProperties" in rspJson)
cpl = rspJson["creationProperties"]
self.assertTrue("filters") in cpl
filters = cpl["filters"]
self.assertEqual(len(filters), 1)
filter = filters[0]
self.assertTrue(isinstance(filter, dict))
self.assertTrue('class' in filter)
self.assertTrue('id' in filter)
self.assertTrue('name' in filter)
self.assertEqual(filter['name'], compressor)
def testCompressionFilterOptionDataset(self):
# test Dataset with creation property list
domain = self.base_domain + "/testCompressionFilterOptionDataset.h5"
helper.setupDomain(domain)
print("testCompressionFilterOptionDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
compressor = {'class': 'H5Z_FILTER_USER', 'name': 'lz4', 'level': 5}
payload = {'type': 'H5T_IEEE_F32LE', 'shape': [40, 80]}
payload['creationProperties'] = { 'filters': [compressor,] }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset
req = self.endpoint + "/groups/" + root_uuid + "/links/dset"
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED')
# verify compression
self.assertTrue("creationProperties" in rspJson)
cpl = rspJson["creationProperties"]
self.assertTrue("filters") in cpl
filters = cpl["filters"]
self.assertEqual(len(filters), 1)
filter = filters[0]
self.assertTrue(isinstance(filter, dict))
self.assertTrue('class' in filter)
self.assertEqual(filter['class'], 'H5Z_FILTER_USER')
self.assertTrue('id' in filter)
self.assertTrue('name' in filter)
self.assertEqual(filter['name'], 'lz4')
def testInvalidFillValue(self):
# test Dataset with simple type and fill value that is incompatible with the type
domain = self.base_domain + "/testInvalidFillValue.h5"
helper.setupDomain(domain)
print("testInvalidFillValue", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
fill_value = 'XXXX' # can't convert to int!
# create the dataset
req = self.endpoint + "/datasets"
payload = {'type': 'H5T_STD_I32LE', 'shape': 10}
payload['creationProperties'] = {'fillValue': fill_value }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 400) # invalid param
def testAutoChunk1dDataset(self):
# test Dataset where chunk layout is set automatically
domain = self.base_domain + "/testAutoChunk1dDataset.h5"
helper.setupDomain(domain)
print("testAutoChunk1dDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
# 50K x 80K dataset
extent = 1000 * 1000 * 1000
dims = [extent,]
fields = ( {'name': 'x', 'type': 'H5T_IEEE_F64LE'},
{'name': 'y', 'type': 'H5T_IEEE_F64LE'},
{'name': 'z', 'type': 'H5T_IEEE_F64LE'})
datatype = {'class': 'H5T_COMPOUND', 'fields': fields }
payload = {'type': datatype, 'shape': dims }
# the following should get ignored as too small
payload['creationProperties'] = {'layout': {'class': 'H5D_CHUNKED', 'dims': [10,] }}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED')
self.assertTrue("dims" in layout_json)
self.assertTrue("partition_count" not in layout_json)
layout = layout_json["dims"]
self.assertEqual(len(layout), 1)
self.assertTrue(layout[0] < dims[0])
chunk_size = layout[0] * 8 * 3 # three 64bit
# chunk size should be between chunk min and max
self.assertTrue(chunk_size >= CHUNK_MIN)
self.assertTrue(chunk_size <= CHUNK_MAX)
def testAutoChunk2dDataset(self):
# test Dataset where chunk layout is set automatically
domain = self.base_domain + "/testAutoChunk2dDataset.h5"
helper.setupDomain(domain)
print("testAutoChunk2dDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
# 50K x 80K dataset
dims = [50000, 80000]
payload = {'type': 'H5T_IEEE_F32LE', 'shape': dims }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED')
self.assertTrue("dims" in layout_json)
layout = layout_json["dims"]
self.assertEqual(len(layout), 2)
self.assertTrue(layout[0] < dims[0])
self.assertTrue(layout[1] < dims[1])
chunk_size = layout[0] * layout[1] * 4
# chunk size should be between chunk min and max
self.assertTrue(chunk_size >= CHUNK_MIN)
self.assertTrue(chunk_size <= CHUNK_MAX)
def testMinChunkSizeDataset(self):
# test Dataset where chunk layout is adjusted if provided
# layout is too small
domain = self.base_domain + "/testMinChunkSizeDataset.h5"
helper.setupDomain(domain)
print("testMinChunkSizeDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
# 50K x 80K dataset
dims = [50000, 80000]
payload = {'type': 'H5T_IEEE_F32LE', 'shape': dims }
# define a chunk layout with lots of small chunks
payload['creationProperties'] = {'layout': {'class': 'H5D_CHUNKED', 'dims': [10, 10] }}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED')
self.assertTrue("dims" in layout_json)
layout = layout_json["dims"]
self.assertEqual(len(layout), 2)
self.assertTrue(layout[0] < dims[0])
self.assertTrue(layout[1] < dims[1])
chunk_size = layout[0] * layout[1] * 4
# chunk size should be between chunk min and max
self.assertTrue(chunk_size >= CHUNK_MIN)
self.assertTrue(chunk_size <= CHUNK_MAX)
def testPostWithLink(self):
domain = self.base_domain + "/testPostWithLink.h5"
helper.setupDomain(domain)
print("testPostWithLink", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# get root group and verify link count is 0
req = helper.getEndpoint() + '/groups/' + root_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["linkCount"], 0)
type_vstr = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"strPad": "H5T_STR_NULLTERM",
"length": "H5T_VARIABLE" }
payload = {'type': type_vstr, 'shape': 10,
'link': {'id': root_uuid, 'name': 'linked_dset'} }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# get root group and verify link count is 1
req = helper.getEndpoint() + '/groups/' + root_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["linkCount"], 1)
# read the link back and verify
req = helper.getEndpoint() + "/groups/" + root_uuid + "/links/linked_dset"
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200) # link doesn't exist yet
rspJson = json.loads(rsp.text)
self.assertTrue("link" in rspJson)
link_json = rspJson["link"]
self.assertEqual(link_json["collection"], "datasets")
self.assertEqual(link_json["class"], "H5L_TYPE_HARD")
self.assertEqual(link_json["title"], "linked_dset")
self.assertEqual(link_json["id"], dset_uuid)
def testPostCommittedType(self):
domain = self.base_domain + "/testPostCommittedType.h5"
helper.setupDomain(domain)
print("testPostCommittedType", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the datatype
payload = {'type': 'H5T_IEEE_F32LE'}
req = self.endpoint + "/datatypes"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create datatype
rspJson = json.loads(rsp.text)
dtype_uuid = rspJson['id']
self.assertTrue(helper.validateId(dtype_uuid))
# link new datatype as 'dtype1'
name = 'dtype1'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {'id': dtype_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# create the dataset
payload = {'type': dtype_uuid, 'shape': [10, 10]}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset1'
name = 'dset1'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# Fetch the dataset type and verify dtype_uuid
req = helper.getEndpoint() + "/datasets/" + dset_uuid + "/type"
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("type" in rspJson)
rsp_type = rspJson["type"]
self.assertTrue("base" in rsp_type)
self.assertEqual(rsp_type["base"], 'H5T_IEEE_F32LE')
self.assertTrue("class" in rsp_type)
self.assertEqual(rsp_type["class"], 'H5T_FLOAT')
self.assertTrue("id" in rsp_type)
self.assertEqual(rsp_type["id"], dtype_uuid)
def testDatasetwithDomainDelete(self):
domain = self.base_domain + "/datasetwithdomaindelete.h6"
print("testDatasetwithDomainDelete:", domain)
helper.setupDomain(domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# get root group and verify link count is 0
req = helper.getEndpoint() + '/groups/' + root_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["linkCount"], 0)
type_vstr = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"strPad": "H5T_STR_NULLTERM",
"length": "H5T_VARIABLE" }
payload = {'type': type_vstr, 'shape': 10,
'link': {'id': root_uuid, 'name': 'linked_dset'} }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
self.assertEqual(root_uuid, rspJson["root"])
# get root group and verify link count is 1
req = helper.getEndpoint() + '/groups/' + root_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertEqual(rspJson["linkCount"], 1)
# delete the domain (with the orginal user)
req = helper.getEndpoint() + '/'
rsp = requests.delete(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
# try getting the domain again
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 410) # GONE
# re-create a domain
rsp = requests.put(req, headers=headers)
self.assertEqual(rsp.status_code, 201)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
self.assertTrue(root_uuid != rspJson["root"])
root_uuid = rspJson["root"]
# try getting the dataset
req = self.endpoint + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
# TODO - this is returning 200 rather than 400
# to fix: delete domain cache on all SN nodes after domain delete?
# self.assertEqual(rsp.status_code, 400) # Not Found
# create a dataset again
req = self.endpoint + "/datasets"
payload = {'type': type_vstr, 'shape': 10,
'link': {'id': root_uuid, 'name': 'linked_dset'} }
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
self.assertEqual(root_uuid, rspJson["root"])
def testContiguousRefDataset(self):
# test Dataset where H5D_CONTIGUOUS_REF layout is used
domain = self.base_domain + "/testContiguousRefDataset.h5"
helper.setupDomain(domain)
print("testContiguousRefDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
# 50K x 80K dataset
dims = [50000, 8000000]
payload = {'type': 'H5T_IEEE_F32LE', 'shape': dims }
file_uri = "s3://a-storage-bucket/some-file.h5"
offset = 1234
size = dims[0] * dims[1] * 4 # uncompressed size
payload['creationProperties'] = {'layout': {'class': 'H5D_CONTIGUOUS_REF', 'file_uri': file_uri, 'offset': offset, 'size': size }}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CONTIGUOUS_REF')
self.assertEqual(layout_json["file_uri"], file_uri)
self.assertEqual(layout_json["offset"], offset)
self.assertEqual(layout_json["size"], size)
self.assertTrue("dims" in layout_json)
chunk_dims = layout_json["dims"]
self.assertEqual(len(chunk_dims), 2)
chunk_size = chunk_dims[0] * chunk_dims[1] * 4
# chunk size should be between chunk min and max
self.assertTrue(chunk_size >= CHUNK_MIN)
self.assertTrue(chunk_size <= CHUNK_MAX)
def testContiguousRefZeroDimDataset(self):
# test Dataset where H5D_CONTIGUOUS_REF layout is used
domain = self.base_domain + "/testContiguousRefZeroDimDataset.h5"
helper.setupDomain(domain)
print("testContiguousRefZeroDimDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
# 0 x 10 dataset
dims = [0, 10]
payload = {'type': 'H5T_STD_I16LE', 'shape': dims }
file_uri = "s3://a-storage-bucket/some-file.h5"
offset = 1234
size = dims[0] * dims[1] * 4 # uncompressed size
payload['creationProperties'] = {'layout': {'class': 'H5D_CONTIGUOUS_REF', 'file_uri': file_uri, 'offset': offset, 'size': size }}
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CONTIGUOUS_REF')
self.assertEqual(layout_json["file_uri"], file_uri)
self.assertEqual(layout_json["offset"], offset)
self.assertEqual(layout_json["size"], size)
self.assertTrue("dims" in layout_json)
chunk_dims = layout_json["dims"]
self.assertEqual(len(chunk_dims), 2)
# layout should be same as the dims
self.assertEqual(chunk_dims[0], dims[0])
self.assertEqual(chunk_dims[1], dims[1])
def testChunkedRefDataset(self):
# test Dataset where H5D_CHUNKED_REF layout is used
domain = self.base_domain + "/testChunkedRefDataset.h5"
helper.setupDomain(domain)
print("testChunkedRefDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
# 2Kx3K dataset
dims = [2000, 3000]
# 1000x1000 chunks
chunk_layout = [1000, 1000]
chunk_size = chunk_layout[0] * chunk_layout[1] * 2 # uncompressed size
# make up some chunk locations
chunks = {}
chunks["0_0"] = [1234+1*chunk_size, chunk_size]
chunks["0_1"] = [1234+2*chunk_size, chunk_size]
chunks["0_2"] = [1234+3*chunk_size, chunk_size]
chunks["1_0"] = [1234+4*chunk_size, chunk_size]
chunks["1_1"] = [1234+5*chunk_size, chunk_size]
chunks["1_2"] = [1234+6*chunk_size, chunk_size]
file_uri = "s3://a-storage-bucket/some-file.h5"
layout = {'class': 'H5D_CHUNKED_REF', 'file_uri': file_uri, 'dims': chunk_layout, 'chunks': chunks }
payload = {'type': 'H5T_STD_I16LE', 'shape': dims }
payload['creationProperties'] = {'layout': layout }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED_REF')
self.assertEqual(layout_json["file_uri"], file_uri)
self.assertEqual(layout_json["chunks"], chunks)
self.assertTrue("dims" in layout_json)
chunk_dims = layout_json["dims"]
self.assertEqual(len(chunk_dims), 2)
def testChunkedRefIndirectDataset(self):
# test Dataset where H5D_CHUNKED_REF_INDIRECT layout is used
domain = self.base_domain + "/testChunkedRefIndirectDataset.h5"
helper.setupDomain(domain)
print("testChunkedRefIndirectDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create a dataset to store chunk info
fields = ({'name': 'offset', 'type': 'H5T_STD_I64LE'},
{'name': 'size', 'type': 'H5T_STD_I32LE'})
chunkinfo_type = {'class': 'H5T_COMPOUND', 'fields': fields }
req = self.endpoint + "/datasets"
# Store 40 chunk locations
chunkinfo_dims = [20,30]
payload = {'type': chunkinfo_type, 'shape': chunkinfo_dims }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
chunkinfo_uuid = rspJson['id']
self.assertTrue(helper.validateId(chunkinfo_uuid))
# create the primary dataset
# 20Kx30K dataset
dims = [20000, 30000]
# 1000x1000 chunks
chunk_layout = [1000, 1000]
file_uri = "s3://a-storage-bucket/some-file.h5"
layout = {'class': 'H5D_CHUNKED_REF_INDIRECT', 'file_uri': file_uri, 'dims': chunk_layout, 'chunk_table': chunkinfo_uuid }
payload = {'type': 'H5T_STD_I16LE', 'shape': dims }
payload['creationProperties'] = {'layout': layout }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED_REF_INDIRECT')
self.assertEqual(layout_json["file_uri"], file_uri)
self.assertTrue("chunks" not in layout_json)
self.assertTrue("dims" in layout_json)
chunk_dims = layout_json["dims"]
self.assertEqual(len(chunk_dims), 2)
self.assertTrue("chunk_table" in layout)
self.assertEqual(layout["chunk_table"], chunkinfo_uuid)
def testChunkedRefIndirectS3UriDataset(self):
# test Dataset where H5D_CHUNKED_REF_INDIRECT layout is used with
# s3uri's stored in the chunk tablee
domain = self.base_domain + "/testChunkedRefIndirectS3UriDataset.h5"
helper.setupDomain(domain)
print("testChunkedRefIndirectS3UriDataset", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create a dataset to store chunk info
max_s3_uri_len = 40
fixed_str_type = {"charSet": "H5T_CSET_ASCII",
"class": "H5T_STRING",
"length": max_s3_uri_len,
"strPad": "H5T_STR_NULLPAD" }
fields = ({'name': 'offset', 'type': 'H5T_STD_I64LE'},
{'name': 'size', 'type': 'H5T_STD_I32LE'},
{'name': 'file_uri', 'type': fixed_str_type})
chunkinfo_type = {'class': 'H5T_COMPOUND', 'fields': fields }
req = self.endpoint + "/datasets"
# Store 40 chunk locations
chunkinfo_dims = [20,30]
payload = {'type': chunkinfo_type, 'shape': chunkinfo_dims }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
chunkinfo_uuid = rspJson['id']
self.assertTrue(helper.validateId(chunkinfo_uuid))
# create the primary dataset
# 20Kx30K dataset
dims = [20000, 30000]
# 1000x1000 chunks
chunk_layout = [1000, 1000]
file_uri = "s3://a-storage-bucket/some-file.h5"
layout = {'class': 'H5D_CHUNKED_REF_INDIRECT', 'file_uri': file_uri, 'dims': chunk_layout, 'chunk_table': chunkinfo_uuid }
payload = {'type': 'H5T_STD_I16LE', 'shape': dims }
payload['creationProperties'] = {'layout': layout }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED_REF_INDIRECT')
self.assertEqual(layout_json["file_uri"], file_uri)
self.assertTrue("chunks" not in layout_json)
self.assertTrue("dims" in layout_json)
chunk_dims = layout_json["dims"]
self.assertEqual(len(chunk_dims), 2)
self.assertTrue("chunk_table" in layout)
self.assertEqual(layout["chunk_table"], chunkinfo_uuid)
def testDatasetChunkPartitioning(self):
# test Dataset partitioning logic for large datasets
domain = self.base_domain + "/testDatasetChunkPartitioning.h5"
helper.setupDomain(domain)
print("testDatasetChunkPartitioning", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
# 50K x 80K x 90K dataset
dims = [50000, 80000, 90000]
payload = {'type': 'H5T_IEEE_F32LE', 'shape': dims }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED')
self.assertTrue("dims" in layout_json)
if config.get("max_chunks_per_folder") > 0:
self.assertTrue("partition_count" in layout_json)
self.assertTrue(layout_json["partition_count"] > 1000) # will change if max_chunks_per_folder is updated
layout = layout_json["dims"]
self.assertEqual(len(layout), 3)
self.assertTrue(layout[0] < dims[0])
self.assertTrue(layout[1] < dims[1])
self.assertTrue(layout[2] < dims[2])
chunk_size = layout[0] * layout[1] * layout[2] * 4
# chunk size should be between chunk min and max
self.assertTrue(chunk_size >= CHUNK_MIN)
self.assertTrue(chunk_size <= CHUNK_MAX)
def testExtendibleDatasetChunkPartitioning(self):
# test Dataset partitioning logic for large datasets
domain = self.base_domain + "/testExtendibleDatasetChunkPartitioning.h5"
helper.setupDomain(domain)
print("testExtendibleDatasetChunkPartitioning", domain)
headers = helper.getRequestHeaders(domain=domain)
# get domain
req = helper.getEndpoint() + '/'
rsp = requests.get(req, headers=headers)
rspJson = json.loads(rsp.text)
self.assertTrue("root" in rspJson)
root_uuid = rspJson["root"]
# create the dataset
req = self.endpoint + "/datasets"
# 50K x 80K x 90K dataset
dims = [0, 80000, 90000]
# unlimited extend in dim 0, fixeed in dimension 2, extenbile by 10x in dim 3
max_dims = [0,80000,900000]
payload = {'type': 'H5T_IEEE_F32LE', 'shape': dims, 'maxdims': max_dims }
req = self.endpoint + "/datasets"
rsp = requests.post(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201) # create dataset
rspJson = json.loads(rsp.text)
dset_uuid = rspJson['id']
self.assertTrue(helper.validateId(dset_uuid))
# link new dataset as 'dset'
name = 'dset'
req = self.endpoint + "/groups/" + root_uuid + "/links/" + name
payload = {"id": dset_uuid}
rsp = requests.put(req, data=json.dumps(payload), headers=headers)
self.assertEqual(rsp.status_code, 201)
# verify layout
req = helper.getEndpoint() + "/datasets/" + dset_uuid
rsp = requests.get(req, headers=headers)
self.assertEqual(rsp.status_code, 200)
rspJson = json.loads(rsp.text)
self.assertTrue("layout" in rspJson)
layout_json = rspJson["layout"]
self.assertTrue("class" in layout_json)
self.assertEqual(layout_json["class"], 'H5D_CHUNKED')
self.assertTrue("dims" in layout_json)
if config.get("max_chunks_per_folder") > 0:
self.assertTrue("partition_count" in layout_json)
layout = layout_json["dims"]
self.assertEqual(len(layout), 3)
chunk_size = layout[0] * layout[1] * layout[2] * 4
# chunk size should be between chunk min and max
self.assertTrue(chunk_size >= CHUNK_MIN)
self.assertTrue(chunk_size <= CHUNK_MAX)
if __name__ == '__main__':
#setup test files
unittest.main()
|
py
|
1a5b167e84d495d5178eeb0b56bbab33c06eac80
|
# Copyright 2021 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Autoregressive State Space Model Tests."""
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import tensorshape_util
from tensorflow_probability.python.internal import test_util
from tensorflow_probability.python.sts import AutoregressiveMovingAverageStateSpaceModel
from tensorflow_probability.python.sts import AutoregressiveStateSpaceModel
tfd = tfp.distributions
def arma_explicit_logp(y, ar_coefs, ma_coefs, level_scale):
"""Manual log-prob computation for arma(p, q) process."""
# Source: page 132 of
# http://www.ru.ac.bd/stat/wp-content/uploads/sites/25/2019/03/504_02_Hamilton_Time-Series-Analysis.pdf
p = len(ar_coefs)
q = len(ma_coefs)
t = len(y)
# For the first few steps of y, where previous values
# are not available, we model them as zero-mean with
# stddev `prior_scale`.
e = np.zeros([t])
for i in range(p):
zero_padded_y = np.zeros([p])
zero_padded_y[p - i:p] = y[:i]
pred_y = np.dot(zero_padded_y, ar_coefs[::-1])
e[i] = y[i] - pred_y
for i in range(p, len(y)):
pred_y = (np.dot(y[i - p:i], ar_coefs[::-1]) +
np.dot(e[i - q:i], ma_coefs[::-1]))
e[i] = y[i] - pred_y
lp = (-((t - p) / 2) * np.log(2 * np.pi)
- ((t - p) / 2) * np.log(level_scale ** 2)
- np.sum(e ** 2 / (2 * level_scale ** 2)))
return lp
class _AutoregressiveMovingAverageStateSpaceModelTest(test_util.TestCase):
def testEqualsAutoregressive(self):
# An ARMA(p, 0) process is just an AR(p) processes
num_timesteps = 10
observed_time_series = self._build_placeholder(
np.random.randn(num_timesteps, 1))
level_scale = self._build_placeholder(0.1)
# We'll test an AR1 process, and also (just for kicks) that the trivial
# embedding as an AR2 process gives the same model.
coefficients_order1 = np.array([1.]).astype(self.dtype)
coefficients_order2 = np.array([1., 1.]).astype(self.dtype)
ar1_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order1,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale]))
ar2_ssm = AutoregressiveStateSpaceModel(
num_timesteps=num_timesteps,
coefficients=coefficients_order2,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
arma1_ssm = AutoregressiveMovingAverageStateSpaceModel(
num_timesteps=num_timesteps,
ar_coefficients=coefficients_order1,
ma_coefficients=np.array([0.]).astype(self.dtype),
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
arma2_ssm = AutoregressiveMovingAverageStateSpaceModel(
num_timesteps=num_timesteps,
ar_coefficients=coefficients_order2,
ma_coefficients=np.array([0.]).astype(self.dtype),
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 1.]))
ar1_lp, arma1_lp, ar2_lp, arma2_lp = (
ar1_ssm.log_prob(observed_time_series),
arma1_ssm.log_prob(observed_time_series),
ar2_ssm.log_prob(observed_time_series),
arma2_ssm.log_prob(observed_time_series)
)
self.assertAllClose(ar1_lp, arma1_lp)
self.assertAllClose(ar2_lp, arma2_lp)
def testLogprobCorrectness(self):
# Compare the state-space model's log-prob to an explicit implementation.
num_timesteps = 10
observed_time_series_ = np.random.randn(num_timesteps)
ar_coefficients_ = np.array([.7, -.1]).astype(self.dtype)
ma_coefficients_ = np.array([0.5, -0.4]).astype(self.dtype)
level_scale_ = 1.0
observed_time_series = self._build_placeholder(observed_time_series_)
level_scale = self._build_placeholder(level_scale_)
expected_logp = arma_explicit_logp(
observed_time_series_, ar_coefficients_, ma_coefficients_, level_scale_)
ssm = AutoregressiveMovingAverageStateSpaceModel(
num_timesteps=num_timesteps,
ar_coefficients=ar_coefficients_,
ma_coefficients=ma_coefficients_,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=[level_scale, 0., 0.]))
lp = ssm.log_prob(observed_time_series[..., tf.newaxis])
self.assertAllClose(lp, expected_logp, rtol=5e-2)
def testBatchShape(self):
# Check that the model builds with batches of parameters.
order = 3
batch_shape = [4, 2]
# No `_build_placeholder`, because coefficients must have static shape.
coefficients = np.random.randn(*(batch_shape + [order])).astype(self.dtype)
order = max(order, order + 1) # shape of initial_state_prior, scale_diag
level_scale = self._build_placeholder(
np.exp(np.random.randn(*batch_shape)))
ssm = AutoregressiveMovingAverageStateSpaceModel(
num_timesteps=10,
ar_coefficients=coefficients,
ma_coefficients=coefficients,
level_scale=level_scale,
initial_state_prior=tfd.MultivariateNormalDiag(
scale_diag=self._build_placeholder(np.ones([order]))))
if self.use_static_shape:
self.assertAllEqual(tensorshape_util.as_list(ssm.batch_shape),
batch_shape)
else:
self.assertAllEqual(ssm.batch_shape_tensor(), batch_shape)
y = ssm.sample(seed=test_util.test_seed(sampler_type='stateless'))
if self.use_static_shape:
self.assertAllEqual(tensorshape_util.as_list(y.shape)[:-2], batch_shape)
else:
self.assertAllEqual(tf.shape(y)[:-2], batch_shape)
def _build_placeholder(self, ndarray):
"""Convert a numpy array to a TF placeholder.
Args:
ndarray: any object convertible to a numpy array via `np.asarray()`.
Returns:
placeholder: a TensorFlow `placeholder` with default value given by the
provided `ndarray`, dtype given by `self.dtype`, and shape specified
statically only if `self.use_static_shape` is `True`.
"""
ndarray = np.asarray(ndarray).astype(self.dtype)
return tf1.placeholder_with_default(
ndarray, shape=ndarray.shape if self.use_static_shape else None)
@test_util.test_all_tf_execution_regimes
class AutoregressiveMovingAverageStateSpaceModelTestStaticShape32(
_AutoregressiveMovingAverageStateSpaceModelTest):
dtype = np.float32
use_static_shape = True
@test_util.test_all_tf_execution_regimes
class AutoregressiveMovingAverageStateSpaceModelTestDynamicShape32(
_AutoregressiveMovingAverageStateSpaceModelTest):
dtype = np.float32
use_static_shape = False
@test_util.test_all_tf_execution_regimes
class AutoregressiveMovingAverageStateSpaceModelTestStaticShape64(
_AutoregressiveMovingAverageStateSpaceModelTest):
dtype = np.float64
use_static_shape = True
# Don't run tests for the base class.
del _AutoregressiveMovingAverageStateSpaceModelTest
if __name__ == '__main__':
test_util.main()
|
py
|
1a5b17a3634d90ab8110bf70e90c078aa90798ff
|
from ..layout import Channel, Layout, load_speakers, load_real_layout, Speaker, RealLayout
from ..geom import cart, PolarPosition, CartesianPosition
from ...common import PolarScreen, CartesianScreen
from attr import evolve
import pytest
import numpy as np
import numpy.testing as npt
@pytest.fixture
def layout():
# odd nominal positions, for testing
return Layout(name="test", channels=[
Channel(name="M+030", polar_position=(30, 0.0, 2.0),
polar_nominal_position=(25, 0.0, 1.5), az_range=(25, 30), el_range=(0, 0), is_lfe=False),
Channel(name="M-030", polar_position=PolarPosition(-30, 0.0, 2.0),
polar_nominal_position=PolarPosition(-25, 0.0, 1.5), az_range=(-30, -25)),
])
def test_positions(layout):
npt.assert_allclose(layout.positions, [cart(30, 0, 2), cart(-30, 0, 2)])
def test_norm_positions(layout):
npt.assert_allclose(layout.norm_positions, [cart(30, 0, 1), cart(-30, 0, 1)])
def test_nominal_positions(layout):
npt.assert_allclose(layout.nominal_positions, [cart(25, 0, 1.5), cart(-25, 0, 1.5)])
def test_without_lfe(layout):
lfe_channel = Channel(name="LFE", polar_position=(30, -20, 2), is_lfe=True)
layout_lfe = evolve(layout, channels=layout.channels + [lfe_channel])
assert len(layout_lfe.channels) == 3
assert len(layout_lfe.without_lfe.channels) == 2
def test_channel_names(layout):
assert layout.channel_names == ["M+030", "M-030"]
def test_channels_by_name(layout):
assert layout.channels_by_name == {
"M+030": layout.channels[0],
"M-030": layout.channels[1],
}
def test_default_nominal_range():
# defaulted nominal position and ranges should be kept when the position is modified
default_channel = Channel(name="name", polar_position=(10, 20, 1))
modified_channel = evolve(default_channel, polar_position=PolarPosition(30, 40, 1))
for channel in [default_channel, modified_channel]:
assert channel.polar_nominal_position == PolarPosition(10, 20, 1)
assert channel.az_range == (10, 10)
assert channel.el_range == (20, 20)
def test_Channel_check_position():
errors = []
Channel(name="name", polar_position=(10, 20, 1)).check_position(callback=errors.append)
Channel(name="name", polar_position=(180, 20, 1), az_range=(175, -175)).check_position(callback=errors.append)
Channel(name="name", polar_position=(180, 20, 1), az_range=(180, 180)).check_position(callback=errors.append)
assert not errors
errors = []
Channel(name="name", polar_position=(10, 20, 1), az_range=(-5, 5)).check_position(callback=errors.append)
assert errors == ["name: azimuth 10.0 out of range (-5, 5)."]
errors = []
Channel(name="name", polar_position=(10, 20, 1), el_range=(0, 15)).check_position(callback=errors.append)
assert errors == ["name: elevation 20.0 out of range (0, 15)."]
def test_Layout_check_position(layout):
errors = []
layout.check_positions(callback=errors.append)
assert errors == []
layout_err = evolve(layout, channels=[
(evolve(channel, polar_position=PolarPosition(30, 10, 1)) if channel.name == "M+030" else channel)
for channel in layout.channels])
errors = []
layout_err.check_positions(callback=errors.append)
assert errors == ["M+030: elevation 10.0 out of range (0, 0)."]
def test_Layout_with_speakers_real_layout(layout):
speakers = [Speaker(channel=1, names=["M+030"], polar_position=PolarPosition(25, 0, 1.5)),
Speaker(channel=2, names=["M-030"]),
Speaker(channel=3, names=["M-110"])]
screen = PolarScreen(aspectRatio=1.5, centrePosition=PolarPosition(10.0, 20.0, 2.0), widthAzimuth=30.0)
new_layout, upmix = layout.with_speakers(speakers)
npt.assert_allclose(new_layout.positions, [cart(25, 0, 1.5), cart(-30, 0, 2)])
npt.assert_allclose(upmix, [[0, 0], [1, 0], [0, 1], [0, 0]])
new_layout, upmix = layout.with_real_layout(RealLayout(speakers=speakers, screen=screen))
npt.assert_allclose(new_layout.positions, [cart(25, 0, 1.5), cart(-30, 0, 2)])
npt.assert_allclose(upmix, [[0, 0], [1, 0], [0, 1], [0, 0]])
assert new_layout.screen == screen
def test_Layout_check_upmix_matrix(layout):
errors = []
upmix = np.array([[0, 0],
[1, 0],
[0, 0.5],
[0, 0]])
layout.check_upmix_matrix(upmix, callback=errors.append)
assert errors == []
errors = []
upmix = np.array([[0, 0],
[1, 0],
[0, 0],
[0, 0]])
layout.check_upmix_matrix(upmix, callback=errors.append)
assert errors == ["Channel M-030 not mapped to any output."]
errors = []
upmix = np.array([[0, 0],
[1, 0],
[0, 1],
[0, 1]])
layout.check_upmix_matrix(upmix, callback=errors.append)
assert errors == ["Channel M-030 mapped to multiple outputs: [2, 3]."]
errors = []
upmix = np.array([[0, 0],
[1, 1],
[0, 0],
[0, 0]])
layout.check_upmix_matrix(upmix, callback=errors.append)
assert errors == ["Speaker idx 1 used by multiple channels: ['M+030', 'M-030']"]
def test_load_layout_info():
def run_test(yaml_obj, expected, func=load_real_layout):
from ruamel import yaml
from six import StringIO
yaml_str = yaml.dump(yaml_obj)
result = func(StringIO(yaml_str))
assert expected == result
run_test(dict(speakers=[dict(channel=0, names="M+000")]),
RealLayout(speakers=[Speaker(0, ["M+000"])]))
run_test(dict(speakers=[dict(channel=0, names=["M+000"])]),
RealLayout(speakers=[Speaker(0, ["M+000"])]))
run_test(dict(speakers=[dict(channel=0, names=["M+000"], position=dict(az=10, el=5, r=1))]),
RealLayout(speakers=[Speaker(0, ["M+000"], PolarPosition(10, 5, 1))]))
run_test(dict(speakers=[dict(channel=0, names=["M+000"], gain_linear=0.5)]),
RealLayout(speakers=[Speaker(0, ["M+000"], gain_linear=0.5)]))
with pytest.raises(Exception) as excinfo:
run_test(dict(speakers=[dict(channel=0, names=["M+000"], position=dict(az=10, el=5))]),
RealLayout(speakers=[Speaker(0, ["M+000"], PolarPosition(10, 5, 1))]))
assert "Unknown position format" in str(excinfo.value)
# old style with speakers at the top level
run_test([dict(channel=0, names="M+000")],
RealLayout(speakers=[Speaker(0, ["M+000"])]))
# polar screen
run_test(dict(screen=dict(type="polar", aspectRatio=1.5, centrePosition=dict(az=10, el=20, r=2), widthAzimuth=30)),
RealLayout(screen=PolarScreen(aspectRatio=1.5, centrePosition=PolarPosition(10.0, 20.0, 2.0), widthAzimuth=30.0)))
# Cartesian screen
run_test(dict(screen=dict(type="cart", aspectRatio=1.5, centrePosition=dict(X=0.1, Y=0.9, Z=0.2), widthX=0.3)),
RealLayout(screen=CartesianScreen(aspectRatio=1.5, centrePosition=CartesianPosition(0.1, 0.9, 0.2), widthX=0.3)))
# passes through null screens
run_test(dict(screen=None),
RealLayout(screen=None))
# legacy speakers wrapper
run_test(dict(speakers=[dict(channel=0, names="M+000")]),
[Speaker(0, ["M+000"])],
func=load_speakers)
|
py
|
1a5b18d59ea8966a9e88081f772e00ad6ebefa74
|
import collections
import logging
from typing import Dict, List, Optional, Set, Tuple, Union, Callable
from blspy import AugSchemeMPL, G1Element
from chiabip158 import PyBIP158
from clvm.casts import int_from_bytes
from chia.consensus.block_record import BlockRecord
from chia.consensus.block_rewards import (
calculate_base_farmer_reward,
calculate_pool_reward,
)
from chia.consensus.block_root_validation import validate_block_merkle_roots
from chia.full_node.mempool_check_conditions import mempool_check_conditions_dict
from chia.consensus.blockchain_interface import BlockchainInterface
from chia.consensus.coinbase import create_farmer_coin, create_pool_coin
from chia.consensus.constants import ConsensusConstants
from chia.consensus.cost_calculator import NPCResult, calculate_cost_of_program
from chia.consensus.find_fork_point import find_fork_point_in_chain
from chia.full_node.block_store import BlockStore
from chia.full_node.coin_store import CoinStore
from chia.full_node.mempool_check_conditions import get_name_puzzle_conditions
from chia.types.blockchain_format.coin import Coin
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.coin_record import CoinRecord
from chia.types.condition_opcodes import ConditionOpcode
from chia.types.condition_with_args import ConditionWithArgs
from chia.types.full_block import FullBlock
from chia.types.generator_types import BlockGenerator
from chia.types.name_puzzle_condition import NPC
from chia.types.unfinished_block import UnfinishedBlock
from chia.util.condition_tools import (
pkm_pairs_for_conditions_dict,
coin_announcements_names_for_npc,
puzzle_announcements_names_for_npc,
)
from chia.util.errors import Err
from chia.util.generator_tools import (
additions_for_npc,
tx_removals_and_additions,
)
from chia.util.hash import std_hash
from chia.util.ints import uint32, uint64, uint128
log = logging.getLogger(__name__)
async def validate_block_body(
constants: ConsensusConstants,
blocks: BlockchainInterface,
block_store: BlockStore,
coin_store: CoinStore,
peak: Optional[BlockRecord],
block: Union[FullBlock, UnfinishedBlock],
height: uint32,
npc_result: Optional[NPCResult],
fork_point_with_peak: Optional[uint32],
get_block_generator: Callable,
) -> Tuple[Optional[Err], Optional[NPCResult]]:
"""
This assumes the header block has been completely validated.
Validates the transactions and body of the block. Returns None for the first value if everything
validates correctly, or an Err if something does not validate. For the second value, returns a CostResult
only if validation succeeded, and there are transactions. In other cases it returns None. The NPC result is
the result of running the generator with the previous generators refs. It is only present for transaction
blocks which have spent coins.
"""
if isinstance(block, FullBlock):
assert height == block.height
prev_transaction_block_height: uint32 = uint32(0)
# 1. For non transaction-blocs: foliage block, transaction filter, transactions info, and generator must
# be empty. If it is a block but not a transaction block, there is no body to validate. Check that all fields are
# None
if block.foliage.foliage_transaction_block_hash is None:
if (
block.foliage_transaction_block is not None
or block.transactions_info is not None
or block.transactions_generator is not None
):
return Err.NOT_BLOCK_BUT_HAS_DATA, None
prev_tb: BlockRecord = blocks.block_record(block.prev_header_hash)
while not prev_tb.is_transaction_block:
prev_tb = blocks.block_record(prev_tb.prev_hash)
assert prev_tb.timestamp is not None
if (
prev_tb.timestamp > constants.INITIAL_FREEZE_END_TIMESTAMP
and len(block.transactions_generator_ref_list) > 0
):
return Err.NOT_BLOCK_BUT_HAS_DATA, None
return None, None # This means the block is valid
# All checks below this point correspond to transaction blocks
# 2. For blocks, foliage block, transactions info must not be empty
if block.foliage_transaction_block is None or block.transactions_info is None:
return Err.IS_TRANSACTION_BLOCK_BUT_NO_DATA, None
assert block.foliage_transaction_block is not None
# keeps track of the reward coins that need to be incorporated
expected_reward_coins: Set[Coin] = set()
# 3. The transaction info hash in the Foliage block must match the transaction info
if block.foliage_transaction_block.transactions_info_hash != std_hash(block.transactions_info):
return Err.INVALID_TRANSACTIONS_INFO_HASH, None
# 4. The foliage block hash in the foliage block must match the foliage block
if block.foliage.foliage_transaction_block_hash != std_hash(block.foliage_transaction_block):
return Err.INVALID_FOLIAGE_BLOCK_HASH, None
# 5. The reward claims must be valid for the previous blocks, and current block fees
# If height == 0, expected_reward_coins will be left empty
if height > 0:
# Add reward claims for all blocks from the prev prev block, until the prev block (including the latter)
prev_transaction_block = blocks.block_record(block.foliage_transaction_block.prev_transaction_block_hash)
prev_transaction_block_height = prev_transaction_block.height
assert prev_transaction_block.fees is not None
pool_coin = create_pool_coin(
prev_transaction_block_height,
prev_transaction_block.pool_puzzle_hash,
calculate_pool_reward(prev_transaction_block.height),
constants.GENESIS_CHALLENGE,
)
farmer_coin = create_farmer_coin(
prev_transaction_block_height,
prev_transaction_block.farmer_puzzle_hash,
uint64(calculate_base_farmer_reward(prev_transaction_block.height) + prev_transaction_block.fees),
constants.GENESIS_CHALLENGE,
)
# Adds the previous block
expected_reward_coins.add(pool_coin)
expected_reward_coins.add(farmer_coin)
# For the second block in the chain, don't go back further
if prev_transaction_block.height > 0:
curr_b = blocks.block_record(prev_transaction_block.prev_hash)
while not curr_b.is_transaction_block:
expected_reward_coins.add(
create_pool_coin(
curr_b.height,
curr_b.pool_puzzle_hash,
calculate_pool_reward(curr_b.height),
constants.GENESIS_CHALLENGE,
)
)
expected_reward_coins.add(
create_farmer_coin(
curr_b.height,
curr_b.farmer_puzzle_hash,
calculate_base_farmer_reward(curr_b.height),
constants.GENESIS_CHALLENGE,
)
)
curr_b = blocks.block_record(curr_b.prev_hash)
if set(block.transactions_info.reward_claims_incorporated) != expected_reward_coins:
return Err.INVALID_REWARD_COINS, None
if block.foliage_transaction_block.timestamp > constants.INITIAL_FREEZE_END_TIMESTAMP:
if len(block.transactions_info.reward_claims_incorporated) != len(expected_reward_coins):
# No duplicates, after transaction freeze period. Duplicates cause no issues because we filter them out
# anyway.
return Err.INVALID_REWARD_COINS, None
removals: List[bytes32] = []
coinbase_additions: List[Coin] = list(expected_reward_coins)
additions: List[Coin] = []
coin_announcement_names: Set[bytes32] = set()
puzzle_announcement_names: Set[bytes32] = set()
npc_list: List[NPC] = []
removals_puzzle_dic: Dict[bytes32, bytes32] = {}
cost: uint64 = uint64(0)
# We check in header validation that timestamp is not more that 10 minutes into the future
if (
block.foliage_transaction_block.timestamp <= constants.INITIAL_FREEZE_END_TIMESTAMP
and block.transactions_generator is not None
):
# 6. No transactions before INITIAL_TRANSACTION_FREEZE timestamp
return Err.INITIAL_TRANSACTION_FREEZE, None
else:
# 7a. The generator root must be the hash of the serialized bytes of
# the generator for this block (or zeroes if no generator)
if block.transactions_generator is not None:
if std_hash(bytes(block.transactions_generator)) != block.transactions_info.generator_root:
return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None
else:
if block.transactions_info.generator_root != bytes([0] * 32):
return Err.INVALID_TRANSACTIONS_GENERATOR_HASH, None
# 8a. The generator_ref_list must be the hash of the serialized bytes of
# the generator ref list for this block (or 'one' bytes [0x01] if no generator)
# 8b. The generator ref list length must be less than or equal to MAX_GENERATOR_REF_LIST_SIZE entries
# 8c. The generator ref list must not point to a height >= this block's height
if block.transactions_generator_ref_list in (None, []):
if block.transactions_info.generator_refs_root != bytes([1] * 32):
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
else:
# If we have a generator reference list, we must have a generator
if block.transactions_generator is None:
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
# The generator_refs_root must be the hash of the concatenation of the List[uint32]
generator_refs_hash = std_hash(b"".join([bytes(i) for i in block.transactions_generator_ref_list]))
if block.transactions_info.generator_refs_root != generator_refs_hash:
return Err.INVALID_TRANSACTIONS_GENERATOR_REFS_ROOT, None
if len(block.transactions_generator_ref_list) > constants.MAX_GENERATOR_REF_LIST_SIZE:
return Err.TOO_MANY_GENERATOR_REFS, None
if any([index >= height for index in block.transactions_generator_ref_list]):
return Err.FUTURE_GENERATOR_REFS, None
if block.transactions_generator is not None:
# Get List of names removed, puzzles hashes for removed coins and conditions created
assert npc_result is not None
cost = calculate_cost_of_program(block.transactions_generator, npc_result, constants.COST_PER_BYTE)
npc_list = npc_result.npc_list
# 7. Check that cost <= MAX_BLOCK_COST_CLVM
log.debug(
f"Cost: {cost} max: {constants.MAX_BLOCK_COST_CLVM} "
f"percent full: {round(100 * (cost / constants.MAX_BLOCK_COST_CLVM), 2)}%"
)
if cost > constants.MAX_BLOCK_COST_CLVM:
return Err.BLOCK_COST_EXCEEDS_MAX, None
# 8. The CLVM program must not return any errors
if npc_result.error is not None:
return Err(npc_result.error), None
for npc in npc_list:
removals.append(npc.coin_name)
removals_puzzle_dic[npc.coin_name] = npc.puzzle_hash
additions = additions_for_npc(npc_list)
coin_announcement_names = coin_announcements_names_for_npc(npc_list)
puzzle_announcement_names = puzzle_announcements_names_for_npc(npc_list)
else:
assert npc_result is None
# 9. Check that the correct cost is in the transactions info
if block.transactions_info.cost != cost:
return Err.INVALID_BLOCK_COST, None
additions_dic: Dict[bytes32, Coin] = {}
# 10. Check additions for max coin amount
# Be careful to check for 64 bit overflows in other languages. This is the max 64 bit unsigned integer
# We will not even reach here because Coins do type checking (uint64)
for coin in additions + coinbase_additions:
additions_dic[coin.name()] = coin
if coin.amount > constants.MAX_COIN_AMOUNT:
return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None
# 11. Validate addition and removal roots
root_error = validate_block_merkle_roots(
block.foliage_transaction_block.additions_root,
block.foliage_transaction_block.removals_root,
additions + coinbase_additions,
removals,
)
if root_error:
return root_error, None
# 12. The additions and removals must result in the correct filter
byte_array_tx: List[bytes32] = []
for coin in additions + coinbase_additions:
byte_array_tx.append(bytearray(coin.puzzle_hash))
for coin_name in removals:
byte_array_tx.append(bytearray(coin_name))
bip158: PyBIP158 = PyBIP158(byte_array_tx)
encoded_filter = bytes(bip158.GetEncoded())
filter_hash = std_hash(encoded_filter)
if filter_hash != block.foliage_transaction_block.filter_hash:
return Err.INVALID_TRANSACTIONS_FILTER_HASH, None
# 13. Check for duplicate outputs in additions
addition_counter = collections.Counter(_.name() for _ in additions + coinbase_additions)
for k, v in addition_counter.items():
if v > 1:
return Err.DUPLICATE_OUTPUT, None
# 14. Check for duplicate spends inside block
removal_counter = collections.Counter(removals)
for k, v in removal_counter.items():
if v > 1:
return Err.DOUBLE_SPEND, None
# 15. Check if removals exist and were not previously spent. (unspent_db + diff_store + this_block)
# The fork point is the last block in common between the peak chain and the chain of `block`
if peak is None or height == 0:
fork_h: int = -1
elif fork_point_with_peak is not None:
fork_h = fork_point_with_peak
else:
fork_h = find_fork_point_in_chain(blocks, peak, blocks.block_record(block.prev_header_hash))
# Get additions and removals since (after) fork_h but not including this block
# The values include: the coin that was added, the height of the block in which it was confirmed, and the
# timestamp of the block in which it was confirmed
additions_since_fork: Dict[bytes32, Tuple[Coin, uint32, uint64]] = {} # This includes coinbase additions
removals_since_fork: Set[bytes32] = set()
# For height 0, there are no additions and removals before this block, so we can skip
if height > 0:
# First, get all the blocks in the fork > fork_h, < block.height
prev_block: Optional[FullBlock] = await block_store.get_full_block(block.prev_header_hash)
reorg_blocks: Dict[uint32, FullBlock] = {}
curr: Optional[FullBlock] = prev_block
assert curr is not None
while curr.height > fork_h:
if curr.height == 0:
break
curr = await block_store.get_full_block(curr.prev_header_hash)
assert curr is not None
reorg_blocks[curr.height] = curr
if fork_h != -1:
assert len(reorg_blocks) == height - fork_h - 1
curr = prev_block
assert curr is not None
while curr.height > fork_h:
# Coin store doesn't contain coins from fork, we have to run generator for each block in fork
if curr.transactions_generator is not None:
# These blocks are in the past and therefore assumed to be valid, so get_block_generator won't raise
curr_block_generator: Optional[BlockGenerator] = await get_block_generator(curr)
assert curr_block_generator is not None and curr.transactions_info is not None
curr_npc_result = get_name_puzzle_conditions(
curr_block_generator,
min(constants.MAX_BLOCK_COST_CLVM, curr.transactions_info.cost),
False,
)
removals_in_curr, additions_in_curr = tx_removals_and_additions(curr_npc_result.npc_list)
else:
removals_in_curr = []
additions_in_curr = []
for c_name in removals_in_curr:
assert c_name not in removals_since_fork
removals_since_fork.add(c_name)
for c in additions_in_curr:
assert c.name() not in additions_since_fork
assert curr.foliage_transaction_block is not None
additions_since_fork[c.name()] = (
c,
curr.height,
curr.foliage_transaction_block.timestamp,
)
for coinbase_coin in curr.get_included_reward_coins():
assert coinbase_coin.name() not in additions_since_fork
assert curr.foliage_transaction_block is not None
additions_since_fork[coinbase_coin.name()] = (
coinbase_coin,
curr.height,
curr.foliage_transaction_block.timestamp,
)
if curr.height == 0:
break
curr = reorg_blocks[curr.height - 1]
assert curr is not None
removal_coin_records: Dict[bytes32, CoinRecord] = {}
for rem in removals:
if rem in additions_dic:
# Ephemeral coin
rem_coin: Coin = additions_dic[rem]
new_unspent: CoinRecord = CoinRecord(
rem_coin,
height,
height,
True,
False,
block.foliage_transaction_block.timestamp,
)
removal_coin_records[new_unspent.name] = new_unspent
else:
unspent = await coin_store.get_coin_record(rem)
if unspent is not None and unspent.confirmed_block_index <= fork_h:
# Spending something in the current chain, confirmed before fork
# (We ignore all coins confirmed after fork)
if unspent.spent == 1 and unspent.spent_block_index <= fork_h:
# Check for coins spent in an ancestor block
return Err.DOUBLE_SPEND, None
removal_coin_records[unspent.name] = unspent
else:
# This coin is not in the current heaviest chain, so it must be in the fork
if rem not in additions_since_fork:
# Check for spending a coin that does not exist in this fork
return Err.UNKNOWN_UNSPENT, None
(
new_coin,
confirmed_height,
confirmed_timestamp,
) = additions_since_fork[rem]
new_coin_record: CoinRecord = CoinRecord(
new_coin,
confirmed_height,
uint32(0),
False,
False,
confirmed_timestamp,
)
removal_coin_records[new_coin_record.name] = new_coin_record
# This check applies to both coins created before fork (pulled from coin_store),
# and coins created after fork (additions_since_fork)
if rem in removals_since_fork:
# This coin was spent in the fork
return Err.DOUBLE_SPEND_IN_FORK, None
removed = 0
for unspent in removal_coin_records.values():
removed += unspent.coin.amount
added = 0
for coin in additions:
added += coin.amount
# 16. Check that the total coin amount for added is <= removed
if removed < added:
return Err.MINTING_COIN, None
fees = removed - added
assert fees >= 0
assert_fee_sum: uint128 = uint128(0)
for npc in npc_list:
if ConditionOpcode.RESERVE_FEE in npc.condition_dict:
fee_list: List[ConditionWithArgs] = npc.condition_dict[ConditionOpcode.RESERVE_FEE]
for cvp in fee_list:
fee = int_from_bytes(cvp.vars[0])
if fee < 0:
return Err.RESERVE_FEE_CONDITION_FAILED, None
assert_fee_sum = uint128(assert_fee_sum + fee)
# 17. Check that the assert fee sum <= fees, and that each reserved fee is non-negative
if fees < assert_fee_sum:
return Err.RESERVE_FEE_CONDITION_FAILED, None
# 18. Check that the fee amount + farmer reward < maximum coin amount
if fees + calculate_base_farmer_reward(height) > constants.MAX_COIN_AMOUNT:
return Err.COIN_AMOUNT_EXCEEDS_MAXIMUM, None
# 19. Check that the computed fees are equal to the fees in the block header
if block.transactions_info.fees != fees:
return Err.INVALID_BLOCK_FEE_AMOUNT, None
# 20. Verify that removed coin puzzle_hashes match with calculated puzzle_hashes
for unspent in removal_coin_records.values():
if unspent.coin.puzzle_hash != removals_puzzle_dic[unspent.name]:
return Err.WRONG_PUZZLE_HASH, None
# 21. Verify conditions
# create hash_key list for aggsig check
pairs_pks: List[G1Element] = []
pairs_msgs: List[bytes] = []
for npc in npc_list:
assert height is not None
unspent = removal_coin_records[npc.coin_name]
error = mempool_check_conditions_dict(
unspent,
coin_announcement_names,
puzzle_announcement_names,
npc.condition_dict,
prev_transaction_block_height,
block.foliage_transaction_block.timestamp,
)
if error:
return error, None
for pk, m in pkm_pairs_for_conditions_dict(
npc.condition_dict, npc.coin_name, constants.AGG_SIG_ME_ADDITIONAL_DATA
):
pairs_pks.append(pk)
pairs_msgs.append(m)
# 22. Verify aggregated signature
# TODO: move this to pre_validate_blocks_multiprocessing so we can sync faster
if not block.transactions_info.aggregated_signature:
return Err.BAD_AGGREGATE_SIGNATURE, None
# noinspection PyTypeChecker
if not AugSchemeMPL.aggregate_verify(pairs_pks, pairs_msgs, block.transactions_info.aggregated_signature):
return Err.BAD_AGGREGATE_SIGNATURE, None
return None, npc_result
|
py
|
1a5b1a6c2e8431bc44bb0af5497e701ad957d35a
|
#!/usr/bin/env python3
import sys
import torch
import logging
import speechbrain as sb
import torchaudio
from hyperpyyaml import load_hyperpyyaml
from speechbrain.tokenizers.SentencePiece import SentencePiece
from speechbrain.utils.data_utils import undo_padding
from speechbrain.utils.distributed import run_on_main
"""Recipe for training a sequence-to-sequence ASR system with CommonVoice.
The system employs an encoder, a decoder, and an attention mechanism
between them. Decoding is performed with beamsearch.
To run this recipe, do the following:
> python train.py hparams/train.yaml
With the default hyperparameters, the system employs a CRDNN encoder.
The decoder is based on a standard GRU and BeamSearch (no LM).
The neural network is trained on both CTC and negative-log likelihood
targets and sub-word units estimated with Byte Pairwise Encoding (BPE).
The experiment file is flexible enough to support a large variety of
different systems. By properly changing the parameter files, you can try
different encoders, decoders, tokens (e.g, characters instead of BPE),
training languages (all CommonVoice languages), and many
other possible variations.
Authors
* Titouan Parcollet 2020
"""
logger = logging.getLogger(__name__)
# Define training procedure
class ASR(sb.core.Brain):
def compute_forward(self, batch, stage):
"""Forward computations from the waveform batches to the output probabilities."""
batch = batch.to(self.device)
wavs, wav_lens = batch.sig
tokens_bos, _ = batch.tokens_bos
wavs, wav_lens = wavs.to(self.device), wav_lens.to(self.device)
# Forward pass
feats = self.hparams.compute_features(wavs)
feats = self.modules.normalize(feats, wav_lens)
## Add augmentation if specified
if stage == sb.Stage.TRAIN:
if hasattr(self.hparams, "augmentation"):
feats = self.hparams.augmentation(feats)
x = self.modules.enc(feats.detach())
e_in = self.modules.emb(tokens_bos) # y_in bos + tokens
h, _ = self.modules.dec(e_in, x, wav_lens)
# Output layer for seq2seq log-probabilities
logits = self.modules.seq_lin(h)
p_seq = self.hparams.log_softmax(logits)
# Compute outputs
if stage == sb.Stage.TRAIN:
current_epoch = self.hparams.epoch_counter.current
if current_epoch <= self.hparams.number_of_ctc_epochs:
# Output layer for ctc log-probabilities
logits = self.modules.ctc_lin(x)
p_ctc = self.hparams.log_softmax(logits)
return p_ctc, p_seq, wav_lens
else:
return p_seq, wav_lens
else:
p_tokens, scores = self.hparams.beam_searcher(x, wav_lens)
return p_seq, wav_lens, p_tokens
def compute_objectives(self, predictions, batch, stage):
"""Computes the loss (CTC+NLL) given predictions and targets."""
current_epoch = self.hparams.epoch_counter.current
if stage == sb.Stage.TRAIN:
if current_epoch <= self.hparams.number_of_ctc_epochs:
p_ctc, p_seq, wav_lens = predictions
else:
p_seq, wav_lens = predictions
else:
p_seq, wav_lens, predicted_tokens = predictions
ids = batch.id
tokens_eos, tokens_eos_lens = batch.tokens_eos
tokens, tokens_lens = batch.tokens
loss_seq = self.hparams.seq_cost(
p_seq, tokens_eos, length=tokens_eos_lens
)
# Add ctc loss if necessary
if (
stage == sb.Stage.TRAIN
and current_epoch <= self.hparams.number_of_ctc_epochs
):
loss_ctc = self.hparams.ctc_cost(
p_ctc, tokens, wav_lens, tokens_lens
)
loss = self.hparams.ctc_weight * loss_ctc
loss += (1 - self.hparams.ctc_weight) * loss_seq
else:
loss = loss_seq
if stage != sb.Stage.TRAIN:
# Decode token terms to words
predicted_words = self.tokenizer(
predicted_tokens, task="decode_from_list"
)
# Convert indices to words
target_words = undo_padding(tokens, tokens_lens)
target_words = self.tokenizer(target_words, task="decode_from_list")
self.wer_metric.append(ids, predicted_words, target_words)
self.cer_metric.append(ids, predicted_words, target_words)
return loss
def fit_batch(self, batch):
"""Train the parameters given a single batch in input"""
predictions = self.compute_forward(batch, sb.Stage.TRAIN)
loss = self.compute_objectives(predictions, batch, sb.Stage.TRAIN)
loss.backward()
if self.check_gradients(loss):
self.optimizer.step()
self.optimizer.zero_grad()
return loss.detach()
def evaluate_batch(self, batch, stage):
"""Computations needed for validation/test batches"""
predictions = self.compute_forward(batch, stage=stage)
with torch.no_grad():
loss = self.compute_objectives(predictions, batch, stage=stage)
return loss.detach()
def on_stage_start(self, stage, epoch):
"""Gets called at the beginning of each epoch"""
if stage != sb.Stage.TRAIN:
self.cer_metric = self.hparams.cer_computer()
self.wer_metric = self.hparams.error_rate_computer()
def on_stage_end(self, stage, stage_loss, epoch):
"""Gets called at the end of an epoch."""
# Compute/store important stats
stage_stats = {"loss": stage_loss}
if stage == sb.Stage.TRAIN:
self.train_stats = stage_stats
else:
stage_stats["CER"] = self.cer_metric.summarize("error_rate")
stage_stats["WER"] = self.wer_metric.summarize("error_rate")
# Perform end-of-iteration things, like annealing, logging, etc.
if stage == sb.Stage.VALID:
old_lr, new_lr = self.hparams.lr_annealing(stage_stats["loss"])
sb.nnet.schedulers.update_learning_rate(self.optimizer, new_lr)
self.hparams.train_logger.log_stats(
stats_meta={"epoch": epoch, "lr": old_lr},
train_stats=self.train_stats,
valid_stats=stage_stats,
)
self.checkpointer.save_and_keep_only(
meta={"WER": stage_stats["WER"]}, min_keys=["WER"],
)
elif stage == sb.Stage.TEST:
self.hparams.train_logger.log_stats(
stats_meta={"Epoch loaded": self.hparams.epoch_counter.current},
test_stats=stage_stats,
)
with open(self.hparams.wer_file, "w") as w:
self.wer_metric.write_stats(w)
# Define custom data procedure
def dataio_prepare(hparams):
# 1. Define datasets
data_folder = hparams["data_folder"]
train_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["train_csv"], replacements={"data_root": data_folder},
)
if hparams["sorting"] == "ascending":
# we sort training data to speed up training and get better results.
train_data = train_data.filtered_sorted(
sort_key="duration",
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "descending":
train_data = train_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
# when sorting do not shuffle in dataloader ! otherwise is pointless
hparams["dataloader_options"]["shuffle"] = False
elif hparams["sorting"] == "random":
pass
else:
raise NotImplementedError(
"sorting must be random, ascending or descending"
)
valid_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["valid_csv"], replacements={"data_root": data_folder},
)
# We also sort the validation data so it is faster to validate
# valid_data = valid_data.filtered_sorted(sort_key="duration")
valid_data = valid_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
test_data = sb.dataio.dataset.DynamicItemDataset.from_csv(
csv_path=hparams["test_csv"], replacements={"data_root": data_folder},
)
# We also sort the test data so it is faster to validate
test_data = test_data.filtered_sorted(
sort_key="duration",
reverse=True,
key_max_value={"duration": hparams["avoid_if_longer_than"]},
key_min_value={"duration": hparams["avoid_if_smaller_than"]},
)
datasets = [train_data, valid_data, test_data]
# defining tokenizer and loading it
tokenizer = SentencePiece(
model_dir=hparams["save_folder"],
vocab_size=hparams["output_neurons"],
annotation_train=hparams["train_csv"],
annotation_read="wrd",
model_type=hparams["token_type"],
character_coverage=hparams["character_coverage"],
bos_id=hparams["bos_index"],
eos_id=hparams["eos_index"]
)
# 2. Define audio pipeline:
@sb.utils.data_pipeline.takes("wav", "start_seg", "end_seg")
@sb.utils.data_pipeline.provides("sig")
def audio_pipeline(wav, start_seg, end_seg):
# info = torchaudio.info(wav)
start = int(float(start_seg) * hparams["sample_rate"])
stop = int(float(end_seg) * hparams["sample_rate"])
speech_segment = {"file" : wav, "start" : start, "stop" : stop}
sig = sb.dataio.dataio.read_audio(speech_segment)
return sig
# resampled = torchaudio.transforms.Resample(
# info.sample_rate, hparams["sample_rate"],
# )(sig)
# return resampled
sb.dataio.dataset.add_dynamic_item(datasets, audio_pipeline)
# 3. Define text pipeline:
@sb.utils.data_pipeline.takes("wrd")
@sb.utils.data_pipeline.provides(
"tokens_list", "tokens_bos", "tokens_eos", "tokens"
)
def text_pipeline(wrd):
tokens_list = tokenizer.sp.encode_as_ids(wrd)
yield tokens_list
tokens_bos = torch.LongTensor([hparams["bos_index"]] + (tokens_list))
yield tokens_bos
tokens_eos = torch.LongTensor(tokens_list + [hparams["eos_index"]])
yield tokens_eos
tokens = torch.LongTensor(tokens_list)
yield tokens
sb.dataio.dataset.add_dynamic_item(datasets, text_pipeline)
# 4. Set output:
sb.dataio.dataset.set_output_keys(
datasets, ["id", "sig", "tokens_bos", "tokens_eos", "tokens"],
)
return train_data, valid_data, test_data, tokenizer
if __name__ == "__main__":
# Load hyperparameters file with command-line overrides
hparams_file, run_opts, overrides = sb.parse_arguments(sys.argv[1:])
with open(hparams_file) as fin:
hparams = load_hyperpyyaml(fin, overrides)
# If distributed_launch=True then
# create ddp_group with the right communication protocol
sb.utils.distributed.ddp_init_group(run_opts)
# Dataset preparation (parsing CommonVoice)
# from common_voice_prepare import prepare_common_voice # noqa
# Create experiment directory
sb.create_experiment_directory(
experiment_directory=hparams["output_folder"],
hyperparams_to_save=hparams_file,
overrides=overrides,
)
# Due to DDP, we do the preparation ONLY on the main python process
# run_on_main(
# prepare_common_voice,
# kwargs={
# "data_folder": hparams["data_folder"],
# "save_folder": hparams["save_folder"],
# "train_tsv_file": hparams["train_tsv_file"],
# "dev_tsv_file": hparams["dev_tsv_file"],
# "test_tsv_file": hparams["test_tsv_file"],
# "accented_letters": hparams["accented_letters"],
# "language": hparams["language"],
# },
# )
# Create the datasets objects as well as tokenization and encoding :-D
train_data, valid_data, test_set, tokenizer = dataio_prepare(hparams)
# Trainer initialization
asr_brain = ASR(
modules=hparams["modules"],
hparams=hparams,
run_opts=run_opts,
opt_class=hparams["opt_class"],
checkpointer=hparams["checkpointer"],
)
# Adding objects to trainer.
asr_brain.tokenizer = tokenizer
# Training
# with torch.autograd.detect_anomaly():
asr_brain.fit(
asr_brain.hparams.epoch_counter,
train_data,
valid_data,
train_loader_kwargs=hparams["dataloader_options"],
valid_loader_kwargs=hparams["test_dataloader_options"],
)
# Test
asr_brain.hparams.wer_file = hparams["output_folder"] + "/wer_test.txt"
asr_brain.evaluate(
test_set,
min_key="WER",
test_loader_kwargs=hparams["test_dataloader_options"],
)
|
py
|
1a5b1ae7e37cdc2617fa34def6673f73516a820f
|
# Test functionality of sellers side
from emarket.client_seller import ClientSeller
from emarket.emarket import Item
import time
from os import environ as env
from dotenv import load_dotenv, find_dotenv
ENV_FILE = find_dotenv()
if ENV_FILE:
load_dotenv(ENV_FILE)
else:
raise FileNotFoundError("Could not locate .env file")
#load the env vars
FRONT_SELLER_A_IP = env.get("FRONT_SELLER_A_IP")
FRONT_SELLER_B_IP = env.get("FRONT_SELLER_B_IP")
cs = ClientSeller([FRONT_SELLER_A_IP, FRONT_SELLER_B_IP])
print("####################### CREATE USER")
start_time = time.time()
csid = cs.create_user("Luke","flamma7", "enterprise")
print("--- %s seconds ---" % (time.time() - start_time))
print("KILL NOT A LEADER")
time.sleep(10)
## TEST LOGIN
print("####################### LOG IN")
start_time = time.time()
cs.login("flamma7", "enterprise")
print("--- %s seconds ---" % (time.time() - start_time))
print("####################### LOG OUT")
start_time = time.time()
cs.logout()
print("--- %s seconds ---" % (time.time() - start_time))
cs.login("flamma7", "enterprise")
## TEST ITEM FOR SALE
print("####################### PUT ITEM FOR SALE")
i1 = Item("ether", 0, 0, ["crypto", "smart", "blockchain"], True, 1300, csid)
i2 = Item("bitcoin", 0, 1, ["crypto", "blockchain", "standard"], True, 33000, csid)
i3 = Item("dogecoin", 0, 2, ["crypto", "meme", "blockchain", "elon"], False, 0.03, csid)
i4 = Item("cardano", 0, 3, ["crypto", "blockchain", "smart", "nextgen"], True, 0.3, csid)
status, i1_id = cs.put_item_for_sale(i1, 500)
status, i2_id = cs.put_item_for_sale(i2, 100)
status, i3_id = cs.put_item_for_sale(i3, 300000)
start_time = time.time()
status, i4_id = cs.put_item_for_sale(i4, 300000)
print("--- %s seconds ---" % (time.time() - start_time))
print("####################### CHANGE SALE PRICE")
start_time = time.time()
cs.change_sale_price_item(i3_id, 0.07)
print("--- %s seconds ---" % (time.time() - start_time))
print("####################### REMOVE ITEM FROM SALE")
start_time = time.time()
cs.remove_item_from_sale(i2_id, 100)
print("--- %s seconds ---" % (time.time() - start_time))
print("####################### DISPLAY ACTIVE ITEMS")
start_time = time.time()
cs.display_active_seller_items()
print("--- %s seconds ---" % (time.time() - start_time))
print("####################### GET RATING")
start_time = time.time()
cs.get_rating()
print("--- %s seconds ---" % (time.time() - start_time))
# Create 2nd Seller
# Create 3rd Seller
|
py
|
1a5b1e77892af46576adcb0c1083e21753244813
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
long_description = """
branching_process
"""
config = dict(
description='hawkes process fitting',
author='Dan MacKinlay',
url='URL to get it at.',
download_url='Where to download it.',
author_email='My email.',
version='0.1',
install_requires=[
'nose',
'scipy',
'numpy',
'seaborn',
'pandas',
],
packages=['branching_process'],
scripts=[],
name='branching_process',
# # see https://python-packaging.readthedocs.io/en/latest/non-code-files.html
# package_data=dict(
# branching_process= ['datasets'],
# ),
# include_package_data=True
)
setup(**config)
|
py
|
1a5b1f4adec2db6408b8d4b2886ca1764683671c
|
"""
@file main.py Main file for running Micropolis with the GTK frontend
@todo Implement run()
"""
import pyMicropolis.micropolisEngine
import gi
gi.require_version('Gtk', '3.0')
gi.require_version('PangoCairo', '1.0')
from pyMicropolis.micropolisEngine import micropolisengine, micropolisgtkengine, micropoliswindow, micropolisrobot
from gi.repository import Gtk as gtk
import random
import math
def run(builderBot=None):
engine = micropolisgtkengine.CreateGTKEngine()
engine.cityTax = 10
engine.setPasses(200)
setTile = engine.setTile
if False:
for i in range(0, 4):
engine.addRobot(
micropolisrobot.MicropolisRobot_PacBot(
x=(8 * 16) + 3 + (16 * 2 * i),
y=(7 * 16) + 3,
direction=0))
if False:
for i in range(0, 20):
engine.addRobot(
micropolisrobot.MicropolisRobot_PacBot(
x=random.randint(0, (micropolisengine.WORLD_W * 16) - 1),
y=random.randint(0, (micropolisengine.WORLD_H * 16) - 1),
direction = random.randint(0, 3) * math.pi / 2))
if False:
for y in range(0, micropolisengine.WORLD_H):
for x in range(0, micropolisengine.WORLD_W):
setTile(x, y, micropolisengine.RUBBLE | micropolisengine.BLBNBIT)
for y in range(10, 15):
for x in range(10, 15):
setTile(x, y, micropolisengine.FIRE | micropolisengine.ANIMBIT)
x = 0
y = 0
w = 800
h = 600
if True:
win1 = micropoliswindow.MicropolisPanedWindow(engine=engine)
win1.set_default_size(w, h)
win1.set_size_request(w, h)
win1.move(x, y)
win1.show_all()
gtk.main()
# for bots. Return the engine for training simulation
def train(bot=None, rank=None, root_gtk=None):
kwargs = {'bot': bot, 'rank': rank, 'root_gtk': root_gtk}
engine = micropolisgtkengine.CreateGTKEngine(**kwargs)
engine.cityTax = 10
engine.setPasses(200)
setTile = engine.setTile
x = 0
y = 0
w = 800
h = 600
if True:
win1 = micropoliswindow.MicropolisPanedWindow(engine=engine)
win1.set_default_size(w, h)
win1.set_size_request(w, h)
win1.move(x, y)
win1.show_all()
return engine, win1
|
py
|
1a5b21cfb8f853f9f4545695d6984e212211771c
|
#!/usr/bin/env python3.6
"""
TODO:
- docs/source links
- how to find the source of a builtin module?
- header
- note that this is run on a Posix machine
- sys.platform ?
- footer
- include propsed additions to pathlib
- P for proposed?
"""
import collections
import functools
import inspect
import itertools
import os as _os
import os.path as _ospath
import re
import shutil as _shutil
import pathlib as _pathlib
import textwrap
import path as _path
#import trio as _trio
meta = {}
meta['os'] = {
'source': 'https://github.com/python/cpython/tree/3.6/Lib/os.py',
'docs': 'https://docs.python.org/3/library/os.html',
'docsbaseurl': 'https://docs.python.org/3/library/os.html#os.',
}
meta['os.path'] = {
'source': [
'https://github.com/python/cpython/blob/3.6/Lib/posixpath.py',
'https://github.com/python/cpython/tree/3.6/Lib/ntpath.py',
'https://github.com/python/cpython/tree/3.6/Lib/macpath.py'],
'docs': 'https://docs.python.org/3/library/os.path.html',
'docsbaseurl': 'https://docs.python.org/3/library/os.path.html#os.path.',
}
meta['shutil'] = {
'source': 'https://github.com/python/cpython/tree/3.6/Lib/shutil.py',
'docs': 'https://docs.python.org/3/library/shutil.html',
'docsbaseurl': 'https://docs.python.org/3/library/shutil.html#shutil.',
}
meta['pathlib'] = {
'source': 'https://github.com/python/cpython/blob/3.6/Lib/pathlib.py',
'docs': 'https://docs.python.org/3/library/pathlib.html',
'docsbaseurl': 'https://docs.python.org/3/library/pathlib.html#pathlib.Path.', # pathlib.PurePath.
}
meta['pathpy'] = {
'source': [
'https://github.com/jaraco/path.py/blob/master/path.py',
],
'src': 'https://github.com/jaraco/path.py',
'docs': 'https://pathpy.readthedocs.io/en/latest/',
'docsbaseurl': 'https://pathpy.readthedocs.io/en/latest/api.html#path.Path.',
}
meta['trio'] = {
'source': 'https://github.com/python-trio/trio/blob/master/trio/_path.py',
'docs': 'https://trio.readthedocs.io/en/latest/reference-io.html#trio.Path',
'src': 'https://github.com/python-trio/trio',
'docsbaseurl': 'https://trio.readthedocs.io/en/latest/reference-io.html#trio.Path.'
}
def maybe_list(obj):
if isinstance(obj, (tuple, list)):
return obj
return (obj,)
def print_header__modules():
print('Modules')
print('+++++++++')
for key, data in meta.items():
print('- %s' % key)
print('')
for url in maybe_list(data.get('src', [])):
print(' - Src: %s' % url)
for url in maybe_list(data['source']):
print(' - Source: %s' % url)
for url in maybe_list(data['docs']):
print(' - Docs: %s' % url)
print('')
mappings = {}
mappings['os'] = {
'unlink': {
'pathpy': 'remove',
'os': 'remove',
},
'lstat': {
'os': 'stat',
'pathlib': 'stat',
}
}
mappings['pathpy'] = {
# 'getcwd': {
# 'os': ['getcwdu', 'getcwdb'],
# },
'__div__': {
'os.path': 'join',
'pathlib': 'joinpath',
'pathpy': ['__rdiv__', 'joinpath'],
},
'__rdiv__': {
'os.path': 'join',
'pathlib': 'joinpath',
'pathpy': ['__div__', 'joinpath'],
},
'cd': {
'os': 'chdir',
},
'getsize': {
'pathpy': 'size'
},
'lines': {
'pathpy': 'text',
},
'name': {
'os.path': 'basename',
'pathpy': 'basename',
},
'parent': {
'os.path': 'dirname',
'pathpy': 'dirname',
},
'read_md5': {
'pathpy': 'read_hash',
},
'readlink': {
'pathpy': 'readlinkabs',
},
'readlinkabs': {
'os': 'readlink',
},
'splitpath': {
'pathpy': ['parent', 'name'],
'os.path': 'split',
},
'stat': {
'pathpy': 'lstat',
},
'size': {
'os.path': 'getsize',
},
'unlink': {
'pathpy': 'remove',
'os': 'remove',
},
'unlink_p': {
'pathpy': 'remove_p',
}
}
mappings['pathlib'] = {
'atime': {
'pathpy': 'getatime',
'os.path': 'getatime',
},
'ctime': {
'pathpy': 'getctime',
'os.path': 'getctime',
},
'mtime': {
'pathpy': 'getmtime',
'os.path': 'getmtime',
},
'cwd': {
'pathpy': 'getcwd',
'os': 'getcwd',
},
'name': {
'os.path': 'basename',
'pathpy': 'basename',
},
'owner': {
'pathpy': 'get_owner',
},
'parent': {
'os.path': 'dirname',
'pathpy': ['parent', 'dirname'],
},
'stat': {
'pathlib': 'lstat',
},
'is_absolute': {
'pathlib': 'absolute',
'pathpy': 'isabs',
'os.path': 'isabs',
},
'is_file': {
'pathpy': 'isfile',
'os.path': 'isfile',
},
'is_dir': {
'pathpy': 'isdir',
'os.path': 'isdir',
},
'is_symlink': {
'pathpy': 'islink',
'os.path': 'islink',
},
'joinpath': {
'os.path': 'join'
},
'iterdir': {
'pathpy': 'listdir',
},
# TODO
}
def build_seealso(mappings=mappings):
"""
Kwargs:
mappings (dict): ``{'pathlib': {'is_abs': {'pathpy': 'isabs'}}}``
Returns:
dict[attrname] = {destattr: [modnames]}
"""
seealso = {}
for mapsetname, mappingset in mappings.items():
for attrname, mappings in mappingset.items():
for modname, destattrs in mappings.items():
for destattr in maybe_list(destattrs):
seealso.setdefault(attrname, {}).setdefault(destattr, {}).setdefault(modname, True)
seealso.setdefault(destattr, {}).setdefault(attrname, {}).setdefault(mapsetname, True)
return seealso
_Thing = collections.namedtuple('Thing',
('name', 'signature', 'docstring', 'source', 'iscallable', 'attr', 'obj'))
class Thing(_Thing):
pass
def get_signatures(obj, additional_attrs=None):
attrs = sorted(x for x in dir(obj) if not x.startswith('_'))
if additional_attrs:
attrs = sorted(attrs + additional_attrs)
for attrname in attrs:
try:
attr = getattr(obj, attrname)
except AttributeError:
continue
if inspect.isbuiltin(attr):
iscallable = True
try:
signature = inspect.signature(attr)
except ValueError:
signature = attr.__class__ #TODO
docstring = inspect.getdoc(attr)
# source = inspect.getsource(attr)
source = None
elif (inspect.isfunction(attr) or inspect.ismethod(attr)):
iscallable = True
signature = inspect.signature(attr)
docstring = inspect.getdoc(attr)
source = inspect.getsource(attr)
elif isinstance(attr, functools.partial):
iscallable = True
signature = inspect.signature(attr)
docstring = inspect.getdoc(attr)
source = None # inspect.getsource(attr)
else:
iscallable = False
signature = ''
docstring = inspect.getdoc(attr) # TODO
source = None
yield attrname, Thing(
name=attrname,
signature=signature,
docstring=docstring,
source=source,
iscallable=iscallable,
attr=attr,
obj=obj)
def build_methods():
methods = {}
methods['os'] = dict(get_signatures(_os))
methods['os.path'] = dict(get_signatures(_ospath))
methods['shutil'] = dict(get_signatures(_shutil))
methods['pathlib'] = dict(get_signatures(_pathlib.Path))
methods['pathpy'] = dict(get_signatures(_path.Path,
['__div__', '__rdiv__']))
# methods['trio'] = dict(get_signatures(_trio.Path))
return methods
def build_sets(methods):
sets = {}
sets['union'] = (
set(methods['pathlib'])
.union(methods['pathpy'])
#.union(methods['os'])
.union(methods['os.path'])
#.union(methods['shutil'])
)
sets['union'].difference_update((
'sys',
'supports_unicode_filenames',
'genericpath',
#'sameopenfile',
#'samestat',
#'extsep',
#'pathsep',
))
sets['union'] = sorted(sets['union'])
sets['pathlib_and_pathpy'] = sorted(
set(methods['pathlib']).intersection(methods['pathpy']))
sets['pathlib_not_pathpy'] = sorted(
set(methods['pathlib']).difference(methods['pathpy']))
sets['pathpy_not_pathlib'] = sorted(
set(methods['pathpy']).difference(methods['pathlib']))
# sets['pathlib_not_trio'] = sorted(
# set(methods['pathlib']).difference(methods['trio']))
return sets
methods = build_methods()
sets = build_sets(methods=methods)
def print_report_header():
print('')
print('==================================')
print('Python file methods and attributes')
print('==================================')
print('')
print('- Objective: Identify and compare Python file '
'functions/methods and attributes from '
'os, os.path, shutil, pathlib, path.py, and trio')
print('- Source: https://github.com/westurner/pyfilemods')
print('- Docs: https://westurner.github.io/pyfilemods/')
print('')
print('Contents')
print('++++++++')
print('.. contents::')
print('')
print_header__modules()
print_report_header()
def print_table(sets=sets, methods=methods):
hdr = '================== == ======= ====== ======= ======='
print(hdr)
print('attr os os.path shutil pathlib path.py')
print(hdr)
for attr in sets['union']:
print('%-18s %-2s %-7s %-6s %-8s %-7s' % (
'`%s`_' % attr,
'X' if attr in methods['os'] else ' ',
'X' if attr in methods['os.path'] else ' ',
'X' if attr in methods['shutil'] else ' ',
'X' if attr in methods['pathlib'] else ' ',
'X' if attr in methods['pathpy'] else ' ',
#'X' if attr in methods['trio'] else ' '
))
print(hdr)
print('')
print('Sets')
print('++++')
print('')
print('attr table')
print('==========')
print('')
print_table(sets=sets, methods=methods)
def print_thing(varname, sets=sets):
print(varname)
print('='*len(varname))
_var = sets[varname]
for x in _var:
print('- `%s`_' % x)
setnames = [
'pathlib_and_pathpy',
'pathlib_not_pathpy',
'pathpy_not_pathlib',
#'pathlib_not_trio',
]
for x in setnames:
print_thing(x, sets=sets)
print('')
def indent(text, n, char=' '):
if not text:
return text
return textwrap.indent(text, char*n)
print('')
print('attrs')
print('+++++')
print('')
def print_code(obj, attr):
_attr = getattr(obj, attr)
if obj and _attr:
print('')
print('.. code:: python')
#print(' :class: highlight')
print('')
print(indent(_attr, 4))
print('')
def fmtsignature(obj):
if obj is None:
return '``None``'
if obj.iscallable:
if not obj.signature:
return ' '
return '``%s``' % (re.sub(
r'<function (.*?) at 0x[\da-f]+>',
r'<function \1 at 0x...>',
str(obj.signature), 1))
else:
return '*attribute*'
modnames = ['os', 'os.path', 'shutil', 'pathlib', 'pathpy'] # , 'trio']
def print_attr_methods(sets=sets, methods=methods, modnames=modnames):
seealso = build_seealso(mappings=mappings)
for method in sets['union']:
methodstr = '``{}``'.format(method)
print(methodstr)
print('=' * (len(methodstr)+1))
attrs = {}
for modname in modnames:
attrs[modname] = methods[modname].get(method)
for name in modnames:
obj = attrs[name]
if obj and obj.signature:
print('| **%s.%s**\ %s' % (name, method, fmtsignature(obj)))
print('')
_seealso = seealso.get(method, {})
if _seealso:
seealsostrs = {m: list() for m in modnames}
for methodname, mods in _seealso.items():
for mod in mods:
seealsostrs[mod].append(
'`%s <#%s>`_' % (
'%s.%s' % (mod, methodname),
methodname.replace('_', '-').strip('-')))
print('| seealso: %s' % ', '.join(
itertools.chain.from_iterable(
(sorted(v) for v in seealsostrs.values()))))
print(' ')
for modname in modnames:
metadata = meta[modname]
obj = attrs[modname]
if obj:
print('| **%s.%s**%s:' % (
modname, method,
('\ %s' % fmtsignature(obj) if obj and obj.signature
else '')))
source_links = [
'`source (%s) <%s>`__' % (_ospath.basename(l), l)
for l in maybe_list(metadata['source'])]
print('| `docs <%s%s>`__ %s' % (
metadata['docsbaseurl'],
method,
' '.join(source_links)))
if obj.source:
print_code(obj, 'source')
else:
print_code(obj, 'docstring')
print('')
print_attr_methods(sets=sets, methods=methods)
if __name__ == '__main__':
import sys
if '-i' in sys.argv:
import ipdb
ipdb.set_trace()
|
py
|
1a5b2233e8ef6f1e9a6b11100ffc4418cf0dcb44
|
# qubit number=5
# total number=44
import cirq
import qiskit
from qiskit import IBMQ
from qiskit.providers.ibmq import least_busy
from qiskit import QuantumCircuit, QuantumRegister, ClassicalRegister
from qiskit import BasicAer, execute, transpile
from pprint import pprint
from qiskit.test.mock import FakeVigo
from math import log2,floor, sqrt, pi
import numpy as np
import networkx as nx
def build_oracle(n: int, f) -> QuantumCircuit:
# implement the oracle O_f^\pm
# NOTE: use U1 gate (P gate) with \lambda = 180 ==> CZ gate
# or multi_control_Z_gate (issue #127)
controls = QuantumRegister(n, "ofc")
oracle = QuantumCircuit(controls, name="Zf")
for i in range(2 ** n):
rep = np.binary_repr(i, n)
if f(rep) == "1":
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.h(controls[n])
if n >= 2:
oracle.mcu1(pi, controls[1:], controls[0])
for j in range(n):
if rep[j] == "0":
oracle.x(controls[j])
# oracle.barrier()
return oracle
def make_circuit(n:int,f) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
classical = ClassicalRegister(n, "qm")
prog = QuantumCircuit(input_qubit, classical)
prog.h(input_qubit[0]) # number=3
prog.h(input_qubit[1]) # number=4
prog.h(input_qubit[2]) # number=5
prog.h(input_qubit[3]) # number=6
prog.h(input_qubit[4]) # number=21
prog.cx(input_qubit[3],input_qubit[0]) # number=32
prog.z(input_qubit[3]) # number=33
prog.cx(input_qubit[3],input_qubit[0]) # number=34
prog.rx(0.11938052083641225,input_qubit[1]) # number=36
Zf = build_oracle(n, f)
repeat = floor(sqrt(2 ** n) * pi / 4)
for i in range(repeat):
prog.append(Zf.to_gate(), [input_qubit[i] for i in range(n)])
prog.h(input_qubit[0]) # number=1
prog.rx(1.4765485471872026,input_qubit[2]) # number=35
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=7
prog.h(input_qubit[3]) # number=8
prog.cx(input_qubit[1],input_qubit[0]) # number=41
prog.x(input_qubit[0]) # number=42
prog.cx(input_qubit[1],input_qubit[0]) # number=43
prog.x(input_qubit[4]) # number=30
prog.x(input_qubit[1]) # number=10
prog.x(input_qubit[2]) # number=11
prog.rx(0.45238934211692994,input_qubit[3]) # number=38
prog.y(input_qubit[1]) # number=39
prog.rx(-2.5258404934861938,input_qubit[1]) # number=25
prog.h(input_qubit[3]) # number=29
prog.cx(input_qubit[0],input_qubit[3]) # number=22
prog.x(input_qubit[3]) # number=23
prog.cx(input_qubit[0],input_qubit[3]) # number=24
if n>=2:
prog.mcu1(pi,input_qubit[1:],input_qubit[0])
prog.x(input_qubit[0]) # number=13
prog.rx(-0.0722566310325653,input_qubit[4]) # number=37
prog.x(input_qubit[1]) # number=14
prog.cx(input_qubit[0],input_qubit[2]) # number=26
prog.x(input_qubit[2]) # number=27
prog.h(input_qubit[4]) # number=40
prog.cx(input_qubit[0],input_qubit[2]) # number=28
prog.x(input_qubit[3]) # number=16
prog.h(input_qubit[0]) # number=17
prog.h(input_qubit[1]) # number=18
prog.h(input_qubit[2]) # number=19
prog.h(input_qubit[3]) # number=20
# circuit end
for i in range(n):
prog.measure(input_qubit[i], classical[i])
return prog
if __name__ == '__main__':
key = "00000"
f = lambda rep: str(int(rep == key))
prog = make_circuit(5,f)
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = least_busy(provider.backends(filters=lambda x: x.configuration().n_qubits >= 2 and not x.configuration().simulator and x.status().operational == True))
sample_shot =7924
info = execute(prog, backend=backend, shots=sample_shot).result().get_counts()
backend = FakeVigo()
circuit1 = transpile(prog,backend,optimization_level=2)
writefile = open("../data/startQiskit_QC1353.csv","w")
print(info,file=writefile)
print("results end", file=writefile)
print(circuit1.depth(),file=writefile)
print(circuit1,file=writefile)
writefile.close()
|
py
|
1a5b2269e6087ccf71743da6ae31229b4bafa3f4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : 河北雪域网络科技有限公司 A.Star
# @contact: [email protected]
# @site:
# @file: setup.py
# @time: 2018/7/11 15:13
# @Software: PyCharm
from setuptools import setup, find_packages
from astartool.setuptool import load_install_requires
from snowland import __version__
setup(
name='snowland-image',
version=__version__,
description=(
'toolkit for image'
),
long_description=open('README.rst', encoding='utf-8').read(),
author='A.Star',
author_email='[email protected]',
maintainer='A.Star',
maintainer_email='[email protected]',
license='BSD License',
packages=find_packages(),
platforms=["all"],
url='https://github.com/AAFun/scikit-snowland',
classifiers=[
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Topic :: Software Development :: Libraries'
],
install_requires=load_install_requires(),
extras_require={
'gis_tool': load_install_requires("optional-requirements-qgis.txt"),
'database_tool': load_install_requires("optional-requirements-database.txt")
}
)
|
py
|
1a5b22cfa440f01590a5c1438cec7decfbfb9847
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# Based on
# https://github.com/tensorflow/models/blob/master/research/struct2depth/model.py#L625-L641
class DepthSmoothnessLoss(nn.Module):
r"""Criterion that computes image-aware depth smoothness loss.
.. math::
\text{loss} = \left | \partial_x d_{ij} \right | e^{-\left \|
\partial_x I_{ij} \right \|} + \left |
\partial_y d_{ij} \right | e^{-\left \| \partial_y I_{ij} \right \|}
Shape:
- Depth: :math:`(N, 1, H, W)`
- Image: :math:`(N, 3, H, W)`
- Output: scalar
Examples::
>>> depth = torch.rand(1, 1, 4, 5)
>>> image = torch.rand(1, 3, 4, 5)
>>> smooth = tgm.losses.DepthSmoothnessLoss()
>>> loss = smooth(depth, image)
"""
def __init__(self) -> None:
super(DepthSmoothnessLoss, self).__init__()
@staticmethod
def gradient_x(img: torch.Tensor) -> torch.Tensor:
assert len(img.shape) == 4, img.shape
return img[:, :, :, :-1] - img[:, :, :, 1:]
@staticmethod
def gradient_y(img: torch.Tensor) -> torch.Tensor:
assert len(img.shape) == 4, img.shape
return img[:, :, :-1, :] - img[:, :, 1:, :]
def forward(self, depth: torch.Tensor, image: torch.Tensor) -> torch.Tensor:
if not torch.is_tensor(depth):
raise TypeError("Input depth type is not a torch.Tensor. Got {}"
.format(type(depth)))
if not torch.is_tensor(image):
raise TypeError("Input image type is not a torch.Tensor. Got {}"
.format(type(image)))
if not len(depth.shape) == 4:
raise ValueError("Invalid depth shape, we expect BxCxHxW. Got: {}"
.format(depth.shape))
if not len(image.shape) == 4:
raise ValueError("Invalid image shape, we expect BxCxHxW. Got: {}"
.format(image.shape))
if not depth.shape[-2:] == image.shape[-2:]:
raise ValueError("depth and image shapes must be the same. Got: {}"
.format(depth.shape, image.shape))
if not depth.device == image.device:
raise ValueError(
"depth and image must be in the same device. Got: {}" .format(
depth.device, image.device))
if not depth.dtype == image.dtype:
raise ValueError(
"depth and image must be in the same dtype. Got: {}" .format(
depth.dtype, image.dtype))
# compute the gradients
depth_dx: torch.Tensor = self.gradient_x(depth)
depth_dy: torch.Tensor = self.gradient_y(depth)
image_dx: torch.Tensor = self.gradient_x(image)
image_dy: torch.Tensor = self.gradient_y(image)
# compute image weights
weights_x: torch.Tensor = torch.exp(
-torch.mean(torch.abs(image_dx), dim=1, keepdim=True))
weights_y: torch.Tensor = torch.exp(
-torch.mean(torch.abs(image_dy), dim=1, keepdim=True))
# apply image weights to depth
smoothness_x: torch.Tensor = torch.abs(depth_dx * weights_x)
smoothness_y: torch.Tensor = torch.abs(depth_dy * weights_y)
return torch.mean(smoothness_x) + torch.mean(smoothness_y)
######################
# functional interface
######################
def depth_smoothness_loss(
depth: torch.Tensor,
image: torch.Tensor) -> torch.Tensor:
r"""Computes image-aware depth smoothness loss.
See :class:`~torchgeometry.losses.DepthSmoothnessLoss` for details.
"""
return DepthSmoothnessLoss()(depth, image)
|
py
|
1a5b2337c70dc41115eddef21a3dfffd7f2d6c9a
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 07 13:30:00 2020
@author: Alan J.X. Guo
"""
import argparse
import scipy.io as sio
import numpy as np
import random
import os
# os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"]="3"
import sys
sys.path.append('./VCA')
from VCA import vca
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Input, Dense, Softmax, Conv1D, Flatten, Add, MaxPooling1D
from tensorflow.keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau, EarlyStopping
class En_De(keras.layers.Layer):
def __init__(self, endmembers_init, **kwargs):
self.emb_init = np.copy(endmembers_init)
self.channels = self.emb_init.shape[-1]
super(En_De, self).__init__(**kwargs)
def build(self, input_shape):
self.emb_wt = self.add_weight(name='emb_wt',
shape=self.emb_init.shape,
initializer=tf.constant_initializer(self.emb_init),
trainable=True)
super(En_De, self).build(input_shape) # Be sure to call this at the end
def call(self, inputs):
return [K.dot(inputs,self.emb_wt),tf.einsum('ij,jk->ijk',inputs,self.emb_wt)]
def compute_output_shape(self, input_shape):
return [(input_shape[0], self.channels),(input_shape[0],input_shape[1],self.channels)]
parser = argparse.ArgumentParser()
parser.add_argument('-p','--path',
help='Path of HSI datamat')
parser.add_argument('-k','--key',
default=None,
help='key of the HSI tensor in the matlab datamat, valid when using *.mat file')
args = parser.parse_args()
if os.path.splitext(args.path)[-1] == '.npy':
print('load {0}.'.format(args.path))
data_mat = np.load(args.path)
elif os.path.splitext(args.path)[-1] == '.mat':
print('load {0} from {1}.'.format(args.key,args.path))
data_mat = sio.loadmat(args.path)
assert args.key in data_mat
data_mat = data_mat[args.key]
def abs_softmax(x):
return tf.math.abs(x)/tf.math.reduce_sum(tf.math.abs(x),
axis=-1,
keepdims=True)
R = 16
CHANNELS = data_mat.shape[-1]
LAMBDA = 0.5
EPOCHS = 200
BATCH_SIZE = 256
vca_x = (data_mat.reshape(-1,CHANNELS).T-np.min(data_mat))/np.max(data_mat)
endmembers, no, reconstruct = vca(vca_x,R)
inputs = Input(shape=(CHANNELS,1))
e1 = Conv1D(512,3,data_format='channels_last',use_bias=True,activation='relu')(inputs)
e2 = Conv1D(128,3,data_format='channels_last',use_bias=True,activation='relu')(e1)
e2 = Flatten()(e2)
e3 = Dense(R,activation=abs_softmax)(e2)
ende = En_De(endmembers.T)
de, de_spand = ende(e3)
d1 = Conv1D(256,1,data_format='channels_first',use_bias=True, activation='relu')(de_spand)
d2 = Conv1D(256,1,data_format='channels_first',use_bias=True, activation='relu')(d1)
d3 = Conv1D(16,1,data_format='channels_first',use_bias=True, activation='relu')(d2)
d4 = Conv1D(16,1,data_format='channels_first',use_bias=True, activation='relu')(d3)
d5 = Conv1D(1,1,data_format='channels_first',use_bias=True, activation='linear')(d4)
d5 = Flatten()(d5)
output = Add()([d5*(1-LAMBDA),de*LAMBDA])
autoencoder = keras.models.Model(inputs=inputs, outputs=output)
ae_x = np.copy(vca_x.T)
np.random.shuffle(ae_x)
ae_x = ae_x[:,:,np.newaxis]
optimizer = keras.optimizers.Adam(lr=0.001)
ende.trainable = True
autoencoder.compile(optimizer, loss='mean_squared_error')
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
cooldown=0,
patience=30,
monitor='loss',
min_delta=1e-8,
min_lr=1e-6,verbose=True)
earlystopping = EarlyStopping(monitor='loss', min_delta=1e-8, patience=50,
verbose=1, mode='auto', baseline=None,
restore_best_weights=True)
callbacks = [lr_reducer, earlystopping]
history = autoencoder.fit(x=[ae_x],y=[ae_x],batch_size=BATCH_SIZE, epochs=EPOCHS, verbose=1, callbacks=callbacks,
shuffle=True)
re = autoencoder.predict(ae_x,batch_size=1024)
diff = re - ae_x.reshape(re.shape)
print("reconstruction error: {0}".format(np.mean(np.mean(np.square(diff),axis=1))))
encoder = keras.models.Model(inputs=inputs, outputs=e3)
abundance = encoder.predict(x=[ae_x],batch_size=1024)
shape = list(data_mat.shape)
shape[-1] = R
abundance = abundance.reshape(shape)
save_path = os.path.splitext(args.path)[0] + '_abundance.npy'
np.save(save_path,abundance)
print('abundance saved to {0}.'.format(save_path))
|
py
|
1a5b250374e082184b53cb2f8113c65050ccc315
|
#!/usr/bin/env python
from django.conf.urls import patterns, url
urlpatterns = patterns(
'pyhn.apps.account.views',
url(r'^$', 'index', name='index'),
url(r'^login/$', 'login', name='login'),
)
urlpatterns += patterns(
'django.contrib.auth.views',
url(r'^logout/$', 'logout', {'next_page': '/'}, 'logout'),
)
|
py
|
1a5b2522c67059cd05dff0b19350cfe53e0a9c05
|
__description__ = 'Devo Python Library.'
__url__ = 'http://www.devo.com'
__version__ = "3.4.1"
__author__ = 'Devo'
__author_email__ = '[email protected]'
__license__ = 'MIT'
__copyright__ = 'Copyright 2020 Devo'
|
py
|
1a5b2578225fb83701168836f952b43dd833f8b2
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import re
import unittest
import onnx.backend.test
from onnx import defs
from onnx_tf import opset_version
from onnx_tf.backend import TensorflowBackend
from onnx_tf.common.legacy import legacy_onnx_pre_ver
from onnx_tf.common.legacy import legacy_opset_pre_ver
def get_onnxtf_supported_ops():
return opset_version.backend_opset_version
def get_onnx_supported_ops():
onnx_ops_dict = {}
for schema in defs.get_all_schemas():
onnx_ops_dict[schema.name] = {
'version': schema.since_version,
'deprecated': schema.deprecated
}
return onnx_ops_dict
# This is a pytest magic variable to load extra plugins
pytest_plugins = 'onnx.backend.test.report',
backend_test = onnx.backend.test.runner.Runner(TensorflowBackend, __name__)
# The test cases excluded below should be considered permanent restrictions
# based on the TensorFlow implementation. Unimplemented operators will raise
# a BackendIsNotSupposedToImplementIt exception so that their test cases
# will pass and show a verbose message stating it was effectively skipped.
# https://github.com/onnx/onnx/issues/349
backend_test.exclude(r'[a-z,_]*GLU[a-z,_]*')
# TF does not support dialation and strides at the same time:
# Will produce strides > 1 not supported in conjunction with dilation_rate > 1
backend_test.exclude(r'[a-z,_]*dilated_strided[a-z,_]*')
backend_test.exclude(r'[a-z,_]*Conv2d_dilated[a-z,_]*')
# TF does not have column major max_pool_with_argmax
backend_test.exclude(
r'[a-z,_]*maxpool_with_argmax_2d_precomputed_strides[a-z,_]*')
# PRelu OnnxBackendPyTorchConvertedModelTest has wrong dim for broadcasting
backend_test.exclude(r'[a-z,_]*PReLU_[0-9]d_multiparam[a-z,_]*')
# TF does not support int8, int16, uint8, uint16, uint32, uint64 for
# tf.floormod and tf.truncatemod
backend_test.exclude(r'test_mod_[a-z,_]*uint[0-9]+')
backend_test.exclude(r'test_mod_[a-z,_]*int(8|(16))+')
# TF doesn't support most of the attributes in resize op
# test_node.py will cover the test
backend_test.exclude(r'test_resize_[a-z,_]*')
# range is using loop in the model test but all the outputs datatype are
# missing in the body attribute of the loop
backend_test.exclude(r'test_range_float_type_positive_delta_expanded[a-z,_]*')
backend_test.exclude(r'test_range_int32_type_negative_delta_expanded[a-z,_]*')
# skip all the cumsum testcases because all the axis in the testcases
# are created as a 1-D 1 element tensor, but the spec clearly state
# that axis should be a 0-D tensor(scalar)
if legacy_opset_pre_ver(13):
backend_test.exclude(r'test_cumsum_[a-z,_]*')
# Currently ONNX's backend test runner does not support sequence as input/output
backend_test.exclude(r'test_if_seq[a-z,_]*')
# TF session run does not support sequence/RaggedTensor as model inputs
backend_test.exclude(r'test_loop13_seq[a-z,_]*')
# TF minimum/maximum do not support uint64 when auto-cast is False (default)
backend_test.exclude(r'test_min_uint64_[a-z,_]*')
backend_test.exclude(r'test_max_uint64_[a-z,_]*')
if legacy_opset_pre_ver(7):
backend_test.exclude(r'[a-z,_]*Upsample[a-z,_]*')
if 'TRAVIS' in os.environ:
backend_test.exclude('test_vgg19')
backend_test.exclude('zfnet512')
if legacy_onnx_pre_ver(1, 2):
# These following tests fails by a tiny margin with onnx<1.2:
backend_test.exclude('test_operator_add_broadcast_cpu')
backend_test.exclude('test_operator_add_size1_broadcast_cpu')
backend_test.exclude('test_operator_add_size1_right_broadcast_cpu')
backend_test.exclude('test_operator_add_size1_singleton_broadcast_cpu')
backend_test.exclude('test_averagepool_3d_default_cpu')
# Do not support consumed flag:
backend_test.exclude('test_batch_normalization')
# Do not support RNN testing on onnx<1.2 due to incorrect tests:
backend_test.exclude(r'test_operator_rnn_cpu')
backend_test.exclude(r'test_operator_lstm_cpu')
backend_test.exclude(r'test_operator_rnn_single_layer_cpu')
# The onnx test for cast, float to string, does not work
if not legacy_opset_pre_ver(9):
backend_test.exclude(r'[a-z,_]*cast[a-z,_]*')
if not legacy_opset_pre_ver(10):
# Do not support dilations != 1 for ConvTranspose, test is added in opset 10
backend_test.exclude(r'[a-z,_]*convtranspose_dilations[a-z,_]*')
# Concat from sequence with new_axis=1 not supported
backend_test.exclude(r'test_sequence_model5_[a-z,_]*')
# Fails rounding tolerance
backend_test.exclude(r'test_gru_seq_length_[a-z,_]*')
# TF pow does not support uint64 when auto-cast is False (default)
backend_test.exclude(r'test_pow_types_float[0-9]+_uint64+_[a-z,_]*')
# TF session run does not support sequence/RaggedTensor as model inputs
backend_test.exclude(r'test_sequence_insert+_[a-z,_]*')
# Exclude tests for Dropout training that have randomness dependent on
# the different implementations
backend_test.exclude('test_training_dropout_default_[a-z,_]*')
backend_test.exclude('test_training_dropout_[a-z,_]*')
backend_test.exclude('test_training_dropout_default_mask_[a-z,_]*')
backend_test.exclude('test_training_dropout_mask_[a-z,_]*')
# TF module can't run gru, lstm, rnn in one session using custom variables
backend_test.exclude(r'test_gru_[a-z,_]*')
backend_test.exclude(r'test_lstm_[a-z,_]*')
backend_test.exclude(r'test_rnn_[a-z,_]*')
backend_test.exclude(r'test_simple_rnn_[a-z,_]*')
# TF doesn't support auto_pad=SAME_LOWER for Conv and ConvTranspose
backend_test.exclude(r'test_conv_with_autopad_same_[a-z,_]*')
backend_test.exclude(r'test_convtranspose_autopad_same_[a-z,_]*')
# Exclude non-deterministic tests
backend_test.exclude(r'test_bernoulli_expanded[a-z,_]*')
backend_test.exclude(r'test_bernoulli_double_expanded[a-z,_]*')
backend_test.exclude(r'test_bernoulli_seed_expanded[a-z,_]*')
# Exclude optional_get_element, test_optional_has_element tests
backend_test.exclude(r'test_optional_get_element[a-z,_]*')
backend_test.exclude(r'test_optional_has_element[a-z,_]*')
# import all test cases at global scope to make them visible to python.unittest
globals().update(backend_test.enable_report().test_cases)
if __name__ == '__main__':
unittest.main()
|
py
|
1a5b25b61b25f24678955482d10d18ed1a699d38
|
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
import sys
import stylesheet
import yaml
from random import shuffle
import meal
from meal import Meal
from functools import partial
import logging
from imp import reload
reload(meal)
class Window(QDialog):
days = ['Sunday',
'Monday',
'Tuesday',
'Wednesday',
'Thursday',
'Friday',
'Saturday']
path = 'meals2.yaml'
def __init__(self):
super(Window, self).__init__()
self.setWindowFlags(Qt.WindowMinimizeButtonHint | Qt.WindowMinMaxButtonsHint)
self.setStyleSheet(stylesheet.main())
self.createUI()
self.createMeals()
self.resize(1200, 600)
def createUI(self):
self.mealUI = {}
self.meals = []
mainLayout = QVBoxLayout()
daysLayout = QHBoxLayout()
self.create = QPushButton('Create Meal Plan')
self.create.clicked.connect(self.createMenu)
for day in self.days:
label = QLabel(day)
self.mealUI[day] = QTextEdit()
refresh = QPushButton('Refresh')
healthier = QPushButton('Healthier')
healthier.clicked.connect(partial(self.healthier, day))
easier = QPushButton('Easier')
easier.clicked.connect(partial(self.easier, day))
layout = QVBoxLayout()
layout.addWidget(label)
layout.addWidget(self.mealUI[day])
layout.addWidget(refresh)
layout.addWidget(healthier)
layout.addWidget(easier)
daysLayout.addLayout(layout)
mainLayout.addWidget(self.create)
mainLayout.addLayout(daysLayout)
self.setLayout(mainLayout)
def createMeals(self):
with open(self.path, 'r') as f:
data = yaml.safe_load(f)
for item in data:
meal = Meal(item['name'])
meal.set_protein(item['protein'])
meal.set_health(item['health'])
meal.set_frequency(item['frequency'])
meal.set_difficulty(item['difficulty'])
self.meals.append(meal)
def createMenu(self):
# Generate list of meals
masterList = []
for meal in self.meals:
# Assign Multiples by Frequency
for i in range(meal.get_frequency()):
masterList.append(meal)
# Randomize List
shuffle(masterList)
# Get Fisrt 7 Meals
weeklyMeals = []
i = 0
fish = False
for meal in masterList:
if i == 7:
break
if meal not in weeklyMeals:
# Only 1 Fish a Week
if meal.get_protein() == 'Fish':
if not fish:
fish = True
weeklyMeals.append(meal)
i += 1
else:
weeklyMeals.append(meal)
i += 1
# Add to UI
i=0
for day in self.days:
self.mealUI[day].setText(weeklyMeals[i].get_name())
i+=1
def refresh(self, day):
pass
def healthier(self, day):
origMeals = {}
for d in self.days:
origMeals[d] = [m for m in self.meals if m.get_name() == self.mealUI[d].toPlainText()][0]
origMeal = origMeals[day]
# Generate list of meals
masterList = []
for meal in self.meals:
# Assign Multiples by Frequency
for i in range(meal.get_frequency()):
masterList.append(meal)
# Randomize List
shuffle(masterList)
masterList = [m for m in masterList if m.getHealth() > origMeal.get_health() and m not in origMeals]
if not masterList:
logging.error('No Healthier Meal Found')
return
self.mealUI[day].clear()
self.mealUI[day].setText(masterList[0].get_name())
def easier(self, day):
origMeals = {}
for d in self.days:
origMeals[d] = [m for m in self.meals if m.get_name() == self.mealUI[d].toPlainText()][0]
origMeal = origMeals[day]
# Generate list of meals
masterList = []
for meal in self.meals:
# Assign Multiples by Frequency
for i in range(meal.get_frequency()):
masterList.append(meal)
# Randomize List
shuffle(masterList)
masterList = [m for m in masterList if m.getDifficulty() < origMeal.getDifficulty() and m not in origMeals]
if not masterList:
logging.error('No Easier Meal Found')
return
self.mealUI[day].clear()
self.mealUI[day].setText(masterList[0].get_name())
if __name__ == '__main__':
app = QApplication(sys.argv)
window = Window()
window.show()
sys.exit(app.exec_())
|
py
|
1a5b2608c183c4d1de6163342ef7d9dc3f8671de
|
__author__ = 'bmiller'
'''
This is the start of something that behaves like
the unittest module from cpython.
'''
import re
class _AssertRaisesContext(object):
"""A context manager used to implement TestCase.assertRaises* methods."""
def __init__(self, expected, test_case):
self.test_case = test_case
self.expected = expected
self.exception = None
def _is_subtype(self, expected, basetype):
if isinstance(expected, tuple):
return all(self._is_subtype(e, basetype) for e in expected)
return isinstance(expected, type) and issubclass(expected, basetype)
def handle(self, args, kwargs):
"""
If args is empty, assertRaises is being used as a
context manager, so return self.
If args is not empty, call a callable passing positional and keyword
arguments.
"""
try:
if not self._is_subtype(self.expected, BaseException):
raise TypeError('assertRaises() arg 1 must be an exception type or tuple of exception types')
if not args:
return self
callable_obj = args[0]
args = args[1:]
with self:
callable_obj(*args, **kwargs)
finally:
# bpo-23890: manually break a reference cycle
self = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
res = True
feedback = ""
self.exception = exc_value
try:
act_exc = exc_type.__name__
except AttributeError:
act_exc = str(exc_type)
try:
exp_exc = self.expected.__name__
except AttributeError:
exp_exc = str(self.expected)
if exc_type is None:
res = False
feedback = "{} not raised".format(exp_exc)
elif not issubclass(exc_type, self.expected):
res = False
feedback = "Expected {} but got {}".format(exp_exc, act_exc)
self.test_case.appendResult(res, act_exc, exp_exc, feedback)
return True
class TestCase(object):
def __init__(self):
self.numPassed = 0
self.numFailed = 0
self.assertPassed = 0
self.assertFailed = 0
self.verbosity = 1
self.tlist = []
testNames = {}
for name in dir(self):
if name[:4] == 'test' and name not in testNames:
self.tlist.append(getattr(self,name))
testNames[name]=True
def setUp(self):
pass
def tearDown(self):
pass
def cleanName(self,funcName):
return funcName.__func__.__name__
def main(self):
for func in self.tlist:
if self.verbosity > 1:
print('Running %s' % self.cleanName(func))
try:
self.setUp()
self.assertPassed = 0
self.assertFailed = 0
func()
self.tearDown()
if self.assertFailed == 0:
self.numPassed += 1
else:
self.numFailed += 1
print('Tests failed in %s ' % self.cleanName(func))
except Exception as e:
self.assertFailed += 1
self.numFailed += 1
print('Test threw exception in %s (%s)' % (self.cleanName(func), e))
self.showSummary()
def assertEqual(self, actual, expected, feedback=""):
res = actual==expected
if not res and feedback == "":
feedback = "Expected %s to equal %s" % (str(actual),str(expected))
self.appendResult(res, actual ,expected, feedback)
def assertNotEqual(self, actual, expected, feedback=""):
res = actual != expected
if not res and feedback == "":
feedback = "Expected %s to not equal %s" % (str(actual),str(expected))
self.appendResult(res, actual, expected, feedback)
def assertTrue(self,x, feedback=""):
res = bool(x) is True
if not res and feedback == "":
feedback = "Expected %s to be True" % (str(x))
self.appendResult(res, x, True, feedback)
def assertFalse(self,x, feedback=""):
res = not bool(x)
if not res and feedback == "":
feedback = "Expected %s to be False" % (str(x))
self.appendResult(res, x, False, feedback)
def assertIs(self,a,b, feedback=""):
res = a is b
if not res and feedback == "":
feedback = "Expected %s to be the same object as %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertIsNot(self,a,b, feedback=""):
res = a is not b
if not res and feedback == "":
feedback = "Expected %s to not be the same object as %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertIsNone(self,x, feedback=""):
res = x is None
if not res and feedback == "":
feedback = "Expected %s to be None" % (str(x))
self.appendResult(res, x, None, feedback)
def assertIsNotNone(self,x, feedback=""):
res = x is not None
if not res and feedback == "":
feedback = "Expected %s to not be None" % (str(x))
self.appendResult(res, x, None, feedback)
def assertIn(self, a, b, feedback=""):
res = a in b
if not res and feedback == "":
feedback = "Expected %s to be in %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertNotIn(self, a, b, feedback=""):
res = a not in b
if not res and feedback == "":
feedback = "Expected %s to not be in %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertIsInstance(self,a,b, feedback=""):
res = isinstance(a,b)
if not res and feedback == "":
feedback = "Expected %s to be an instance of %s" % (str(a), str(b))
self.appendResult(res, a, b, feedback)
def assertNotIsInstance(self,a,b, feedback=""):
res = not isinstance(a,b)
if not res and feedback == "":
feedback = "Expected %s to not be an instance of %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertRegex(self, text, expected_regex, feedback=""):
"""Fail the test unless the text matches the regular expression."""
if isinstance(expected_regex, (str, )): #bytes
assert expected_regex, "expected_regex must not be empty."
expected_regex = re.compile(expected_regex)
if not expected_regex.search(text):
res = False
feedback = "Regex didn't match: %r not found in %r" % (
repr(expected_regex), text)
else:
res = True
self.appendResult(res, text, expected_regex, feedback)
def assertNotRegex(self, text, unexpected_regex, feedback=""):
"""Fail the test if the text matches the regular expression."""
if isinstance(unexpected_regex, (str, )): # bytes
unexpected_regex = re.compile(unexpected_regex)
match = unexpected_regex.search(text)
if match:
feedback = 'Regex matched: %r matches %r in %r' % (
text[match.start() : match.end()],
repr(unexpected_regex),
text)
# _formatMessage ensures the longMessage option is respected
self.appendResult(not bool(match), text, unexpected_regex, feedback)
def assertAlmostEqual(self, a, b, places=7, feedback="", delta=None):
if delta is not None:
res = abs(a-b) <= delta
else:
if places is None:
places = 7
res = round(a-b, places) == 0
if not res and feedback == "":
feedback = "Expected %s to equal %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertNotAlmostEqual(self, a, b, places=7, feedback="", delta=None):
if delta is not None:
res = not (a == b) and abs(a - b) > delta
else:
if places is None:
places = 7
res = round(a-b, places) != 0
if not res and feedback == "":
feedback = "Expected %s to not equal %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertGreater(self,a,b, feedback=""):
res = a > b
if not res and feedback == "":
feedback = "Expected %s to be greater than %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertGreaterEqual(self,a,b, feedback=""):
res = a >= b
if not res and feedback == "":
feedback = "Expected %s to be >= %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertLess(self, a, b, feedback=""):
res = a < b
if not res and feedback == "":
feedback = "Expected %s to be less than %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def assertLessEqual(self,a,b, feedback=""):
res = a <= b
if not res and feedback == "":
feedback = "Expected %s to be <= %s" % (str(a),str(b))
self.appendResult(res, a, b, feedback)
def appendResult(self,res,actual,expected,feedback):
if res:
msg = 'Pass'
self.assertPassed += 1
else:
msg = 'Fail: ' + feedback
print(msg)
self.assertFailed += 1
def assertRaises(self, expected_exception, *args, **kwargs):
context = _AssertRaisesContext(expected_exception, self)
try:
return context.handle(args, kwargs)
finally:
# bpo-23890: manually break a reference cycle
context = None
def fail(self, msg=None):
if msg is None:
msg = 'Fail'
else:
msg = 'Fail: ' + msg
print(msg)
self.assertFailed += 1
def showSummary(self):
pct = self.numPassed / (self.numPassed+self.numFailed) * 100
print("Ran %d tests, passed: %d failed: %d\n" % (self.numPassed+self.numFailed,
self.numPassed, self.numFailed))
def main(verbosity=1):
glob = globals() # globals() still needs work
for name in glob:
if type(glob[name]) == type and issubclass(glob[name], TestCase):
try:
tc = glob[name]()
tc.verbosity = verbosity
tc.main()
except:
print("Uncaught Error in: ", name)
|
py
|
1a5b2709abfe6b1aa20e838a7bc4463ba3531185
|
from django.utils.translation import gettext as _
from enums_test import EnumsTest
class TestRecordingContext(EnumsTest):
entity = 'recording_context'
title = _("Contexte d’enregistrement")
data = [
{"id":"1", "name":"collectage", "notes":"collectage"},
{"id":"2", "name":"bal", "notes":"bal"},
{"id":"3", "name":"enquête", "notes":"enquête"},
]
new_data = {"name":"spectacle", "notes":"Notes spectacle"}
|
py
|
1a5b2728862dc7e39755b8abddb6070e46e2303f
|
#!/usr/bin/env python
"""give me some AFOS data please"""
from __future__ import print_function
import cgi
import unittest
from pyiem.util import get_dbconn, ssw
def pil_logic(s):
"""Convert the CGI pil value into something we can query
Args:
s (str): The CGI variable wanted
Returns:
list of PILs to send to the databae"""
if s == '':
return []
s = s.upper()
pils = []
if s.find(",") == -1:
pils.append(s)
else:
pils = s.split(",")
res = []
for pil in pils:
if pil[:3] == "WAR":
for q in ['FLS', 'FFS', 'AWW', 'TOR', 'SVR', 'FFW', 'SVS',
'LSR', 'SPS', 'WSW', 'FFA', 'WCN']:
res.append("%s%s" % (q, pil[3:6]))
else:
res.append("%6.6s" % (pil.strip() + ' ', ))
return res
def main():
"""Process the request"""
# Attempt to keep the file from downloading and just displaying in chrome
form = cgi.FieldStorage()
pils = pil_logic(form.getfirst('pil', ''))
try:
limit = int(form.getfirst('limit', 1))
except ValueError:
limit = 1
center = form.getfirst('center', '')[:4]
sdate = form.getfirst('sdate', '')[:10]
edate = form.getfirst('edate', '')[:10]
ttaaii = form.getfirst('ttaaii', '')[:6]
fmt = form.getfirst('fmt', 'text')
ssw("X-Content-Type-Options: nosniff\n")
if form.getfirst('dl') == "1":
ssw("Content-type: application/octet-stream\n")
ssw("Content-Disposition: attachment; filename=afos.txt\n\n")
else:
if fmt == 'text':
ssw("Content-type: text/plain\n\n")
elif fmt == 'html':
ssw("Content-type: text/html\n\n")
if not pils:
ssw("ERROR: No pil specified...")
return
centerlimit = '' if center == '' else (" and source = '%s' " % (center, ))
timelimit = ''
if sdate != '':
timelimit += " and entered >= '%s' " % (sdate, )
if edate != '':
timelimit += " and entered < '%s' " % (edate, )
if pils[0][:3] == 'MTR':
access = get_dbconn('iem', user='nobody')
cursor = access.cursor()
sql = """
SELECT raw from current_log c JOIN stations t
on (t.iemid = c.iemid)
WHERE raw != '' and id = '%s' ORDER by valid DESC LIMIT %s
""" % (pils[0][3:].strip(), limit)
cursor.execute(sql)
for row in cursor:
if fmt == 'html':
ssw("<pre>\n")
else:
ssw("\001\n")
ssw(row[0].replace("\r\r\n", "\n"))
if fmt == 'html':
ssw("</pre>\n")
else:
ssw("\003\n")
if cursor.rowcount == 0:
ssw("ERROR: METAR lookup for %s failed" % (
pils[0][3:].strip(), ))
return
try:
mydb = get_dbconn('afos', user='nobody')
except Exception as _exp: # noqa
ssw('Error Connecting to Database, please try again!\n')
return
cursor = mydb.cursor()
if len(pils) == 1:
pillimit = " pil = '%s' " % (pils[0], )
if len(pils[0].strip()) == 3:
pillimit = " substr(pil, 1, 3) = '%s' " % (pils[0].strip(), )
else:
pillimit = " pil in %s" % (tuple(pils), )
ttlimit = ''
if len(ttaaii) == 6:
ttlimit = " and wmo = '%s' " % (ttaaii, )
# Do optimized query first, see if we can get our limit right away
sql = """
SELECT data, pil,
to_char(entered at time zone 'UTC', 'YYYYMMDDHH24MI') as ts
from products WHERE %s
and entered > now() - '31 days'::interval %s %s %s
ORDER by entered DESC LIMIT %s""" % (pillimit, centerlimit,
timelimit, ttlimit, limit)
cursor.execute(sql)
if cursor.rowcount != limit:
sql = """
SELECT data, pil,
to_char(entered at time zone 'UTC', 'YYYYMMDDHH24MI') as ts
from products WHERE %s %s %s %s
ORDER by entered DESC LIMIT %s """ % (pillimit, centerlimit,
timelimit, ttlimit, limit)
cursor.execute(sql)
for row in cursor:
if fmt == 'html':
ssw((
"<a href=\"/wx/afos/p.php?pil=%s&e=%s\">Permalink</a> "
"for following product: "
) % (row[1], row[2]))
ssw("<br /><pre>\n")
else:
ssw("\001\n")
# Remove control characters from the product as we are including
# them manually here...
ssw((row[0]).replace(
"\003", "").replace("\001\r\r\n", "").replace("\r\r\n", "\n"))
if fmt == 'html':
ssw("</pre><hr>\n")
else:
ssw("\n\003\n")
if cursor.rowcount == 0:
print("ERROR: Could not Find: %s" % (",".join(pils), ))
if __name__ == '__main__':
main()
class TestRetrieve(unittest.TestCase):
"""some tests"""
def test_pil_logic(self):
"""Make sure our pil logic works! """
res = pil_logic("AFDDMX")
assert len(res) == 1
assert res[0] == 'AFDDMX'
res = pil_logic("WAREWX")
assert len(res) == 12
res = pil_logic("STOIA,AFDDMX")
assert res[0] == 'STOIA '
assert res[1] == 'AFDDMX'
|
py
|
1a5b29368e573af1c851edd0741e1a298fe71b00
|
registros = []
def make_album(artista, album, num_musicas=None):
dicionario = {"artista": artista, "album": album}
if num_musicas:
dicionario["num_musicas"] = num_musicas
return dicionario
def print_album(album):
if "num_musicas" in album:
print(f'Artista: {album["artista"]}, Album: {album["album"]}, Numero de Musicas: {album["num_musicas"]}')
else:
print(f'Artista: {album["artista"]}, Album: {album["album"]}')
while True:
artista = input("Qual o nome do artista? ")
album = input("Qual o nome do album? ")
num_musica = input("Qual o numero de musicas do album? Se não desejar adcionar um numero, aperte \"Enter\". ")
dic_album = make_album(artista, album, num_musica)
print_album(dic_album)
continuar = input("Você deseja adcionar mais um resgistro(s/n)? ")
if continuar.lower() == "n":
print("Até a proxima!!!")
break
|
py
|
1a5b294a3d81a1c7d0d34606b2cd80bf04b59dfa
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""optimizer"""
from typing import Iterable
import numpy as np
import mindspore
from mindspore.ops import functional as F, composite as C, operations as P
from mindspore.ops.operations import _inner_ops as inner
from mindspore.nn.cell import Cell
from mindspore.nn.layer.container import CellList
from mindspore.common.parameter import Parameter, ParameterTuple
from mindspore.common.initializer import initializer
from mindspore.common.tensor import Tensor, RowTensor
import mindspore.common.dtype as mstype
from mindspore._checkparam import Validator as validator
from mindspore import log as logger
from mindspore.parallel._utils import _get_global_rank, _get_device_num, _get_parallel_mode
from mindspore.context import ParallelMode
from mindspore import context
from mindspore.nn.learning_rate_schedule import LearningRateSchedule
__all__ = ['Optimizer']
class Optimizer(Cell):
"""
Base class for all optimizers.
Note:
This class defines the API to add Ops to train a model. Never use
this class directly, but instead instantiate one of its subclasses.
Different parameter groups can set different `learning_rate`, `weight_decay` and `grad_centralization`.
When separating parameter groups, the weight decay in each group will be applied on the parameters if the
weight_decay is positive. For most optimizer, when not separating parameters, the `weight_decay` in the API will
be applied on the parameters without 'beta' or 'gamma' in their names if `weight_decay` is positive.
When separating parameter groups, if you want to centralize the gradient, set grad_centralization to True,
but the gradient centralization can only be applied to the parameters of the convolution layer.
If the parameters of the non convolution layer are set to True, an error will be reported.
To improve parameter groups performance, the customized order of parameters can be supported.
Args:
learning_rate (Union[float, Tensor, Iterable, LearningRateSchedule]): A value or a graph for the learning
rate. When the learning_rate is an Iterable or a Tensor in a 1D dimension, use dynamic learning rate, then
the i-th step will take the i-th value as the learning rate. When the learning_rate is LearningRateSchedule,
use dynamic learning rate, the i-th learning rate will be calculated during the process of training
according to the formula of LearningRateSchedule. When the learning_rate is a float or a Tensor in a zero
dimension, use fixed learning rate. Other cases are not supported. The float learning rate must be
equal to or greater than 0. If the type of `learning_rate` is int, it will be converted to float.
parameters (Union[list[Parameter], list[dict]]): When the `parameters` is a list of `Parameter` which will be
updated, the element in `parameters` must be class `Parameter`. When the `parameters` is a list of `dict`,
the "params", "lr", "weight_decay" and "order_params" are the keys can be parsed.
- params: Required. The value must be a list of `Parameter`.
- lr: Optional. If "lr" in the keys, the value of corresponding learning rate will be used.
If not, the `learning_rate` in the API will be used.
- weight_decay: Optional. If "weight_decay" in the keys, the value of corresponding weight decay
will be used. If not, the `weight_decay` in the API will be used.
- order_params: Optional. If "order_params" in the keys, the value must be the order of parameters and
the order will be followed in optimizer. There are no other keys in the `dict` and the parameters which
in the value of 'order_params' must be in one of group parameters.
- grad_centralization: Optional. The data type of "grad_centralization" is Bool. If "grad_centralization"
is in the keys, the set value will be used. If not, the `grad_centralization` is False by default.
This parameter only works on the convolution layer.
weight_decay (Union[float, int]): An int or a floating point value for the weight decay.
It must be equal to or greater than 0.
If the type of `weight_decay` input is int, it will be converted to float. Default: 0.0.
loss_scale (float): A floating point value for the loss scale. It must be greater than 0. If the
type of `loss_scale` input is int, it will be converted to float. Default: 1.0.
Raises:
TypeError: If `learning_rate` is not one of int, float, Tensor, Iterable, LearningRateSchedule.
TypeError: If element of `parameters` is neither Parameter nor dict.
TypeError: If `loss_scale` is not a float.
TypeError: If `weight_decay` is neither float nor int.
ValueError: If `loss_scale` is less than or equal to 0.
ValueError: If `weight_decay` is less than 0.
ValueError: If `learning_rate` is a Tensor, but the dimension of tensor is greater than 1.
Supported Platforms:
``Ascend`` ``GPU``
"""
def __init__(self, learning_rate, parameters, weight_decay=0.0, loss_scale=1.0):
super(Optimizer, self).__init__(auto_prefix=False)
if parameters is not None and not isinstance(parameters, list):
parameters = list(parameters)
if not parameters:
raise ValueError("Optimizer got an empty parameter list.")
if not isinstance(parameters[0], (dict, Parameter)):
raise TypeError("Only a list of Parameter or dict can be supported.")
if isinstance(loss_scale, int):
loss_scale = float(loss_scale)
validator.check_value_type("loss_scale", loss_scale, [float], self.cls_name)
validator.check_positive_float(loss_scale, "loss_scale", self.cls_name)
self.loss_scale = loss_scale
weight_decay = self._preprocess_weight_decay(weight_decay)
self.grad_centralization = False
self._unique = True
self._target = context.get_context("device_target")
self.dynamic_lr = False
self.assignadd = None
self.global_step = None
self.is_group = False
self.is_group_lr = False
self.is_group_params_ordered = False
learning_rate = self._preprocess_single_lr(learning_rate)
if isinstance(parameters[0], dict):
self.is_group = True
self.group_params = []
self.group_lr = []
self.group_weight_decay = []
self.group_grad_centralization = []
self._init_group_params(parameters, learning_rate, weight_decay, self.grad_centralization)
# The final value of dynamic_lr can be determined after the process of parse_single_lr and init_group_params
if self.dynamic_lr:
self.assignadd = P.AssignAdd()
self.global_step = Parameter(initializer(0, [1], mindspore.int32), name='global_step')
if self.is_group_lr:
self.learning_rate = CellList(self.group_lr, auto_prefix=False) if self.dynamic_lr \
else ParameterTuple(self.group_lr)
else:
self.learning_rate = self._build_single_lr(learning_rate, 'learning_rate')
if self.is_group:
self.parameters = ParameterTuple(self.group_params)
self.weight_decay = tuple(self.group_weight_decay)
self.weight_decay_tensor_tuple = tuple(Tensor(x, mstype.float32) for x in self.group_weight_decay)
decay_filter = lambda x: x > 0
self.decay_flags = tuple(decay_filter(x) for x in self.weight_decay)
self.exec_weight_decay = any(self.decay_flags)
self.grad_centralization_flags = tuple(self.group_grad_centralization)
else:
self.parameters = ParameterTuple(parameters)
self.weight_decay = weight_decay * loss_scale
self.weight_decay_tensor = Tensor(self.weight_decay, mstype.float32)
decay_filter = lambda x: 'beta' not in x.name and 'gamma' not in x.name
self.decay_flags = tuple(decay_filter(x) for x in self.parameters)
self.exec_weight_decay = self.weight_decay > 0
# when a parameter has been unique, there is no need do another unique in optimizer.
for param in self.parameters:
if param.unique:
self._unique = False
break
ps_filter = lambda x: x.is_param_ps
self.ps_parameters = tuple(ps_filter(x) for x in self.parameters)
cache_filter = lambda x: x.cache_enable
self.cache_enable = tuple(cache_filter(x) for x in self.parameters)
self.reciprocal_scale = Tensor(1.0 / loss_scale, mstype.float32)
self.need_scale = loss_scale != 1.0
self.global_step_increase_tensor = Tensor(1, mstype.int32)
self.param_length = len(self.parameters)
self.map_ = C.Map()
self._use_parallel_optimizer()
def _use_parallel_optimizer(self):
"""Indicates whether to use automatic parallelism."""
if context.get_auto_parallel_context("enable_parallel_optimizer"):
if _get_parallel_mode() == ParallelMode.DATA_PARALLEL and context.get_context("device_target") == "Ascend":
self.use_parallel = True
elif _get_parallel_mode() == ParallelMode.DATA_PARALLEL \
and context.get_context("device_target") != "Ascend":
raise RuntimeError("Parallel optimizer only supports Ascend in data parallel mode.")
elif _get_parallel_mode() in (ParallelMode.STAND_ALONE, ParallelMode.HYBRID_PARALLEL):
raise RuntimeError("Parallel optimizer is not supported in {}.".format(_get_parallel_mode()))
else:
self.use_parallel = False
else:
self.use_parallel = False
if self.use_parallel:
if self.cls_name not in ["Lamb", "AdamWeightDecay"]:
raise RuntimeError("Parallel optimizer does not support optimizer {}".format(self.cls_name))
self.dev_num = _get_device_num()
if self.dev_num > self.param_length:
raise RuntimeError("Parallel optimizer can not be applied when the number of parameters {} is"
" less than the number of devices {}".format(self.param_length, self.dev_num))
self.param_rank = self._get_parameter_group_id()
self.optim_filter = tuple(map(lambda x: x == _get_global_rank(), self.param_rank))
self.param_names = []
for param in self.parameters:
self.param_names.append(param.name)
else:
self.optim_filter = (True,) * self.param_length
@property
def unique(self):
"""The method is to see whether to make unique. The input type is bool. The method is read-only."""
return self._unique
@unique.setter
def unique(self, value):
"""Set whether the input value is unique."""
if not isinstance(value, bool):
raise TypeError("The value type must be bool, but got value type is {}".format(type(value)))
self._unique = value
@property
def target(self):
"""The method is used to determine whether the parameter is updated on host or device. The input type is str
and can only be 'CPU', 'Ascend' or 'GPU'."""
return self._target
@target.setter
def target(self, value):
"""If the input value is set to "CPU", the parameters will be updated on the host using the Fused
optimizer operation."""
raise NotImplementedError
def decay_weight(self, gradients):
"""
Weight decay.
An approach to reduce the overfitting of a deep learning neural network model.
Args:
gradients (tuple[Tensor]): The gradients of `self.parameters`, and have the same shape as
`self.parameters`.
Returns:
tuple[Tensor], The gradients after weight decay.
"""
if self.exec_weight_decay:
params = self.parameters
if self.is_group:
gradients = self.map_(F.partial(_apply_decay), self.weight_decay_tensor_tuple, self.decay_flags,
params, gradients)
else:
gradients = self.map_(F.partial(_apply_decay, self.weight_decay_tensor), self.decay_flags,
params, gradients)
return gradients
def gradients_centralization(self, gradients):
"""
Gradients centralization.
A method for optimizing convolutional layer parameters to impore the training speed of a deep learning neural
network model.
Args:
gradients (tuple[Tensor]): The gradients of `self.parameters`, and have the same shape as
`self.parameters`.
Returns:
tuple[Tensor], The gradients after gradients centralization.
"""
if self.is_group:
gradients = self.map_(F.partial(_apply_grad_centralization), self.grad_centralization_flags, gradients)
return gradients
def scale_grad(self, gradients):
"""
Loss scale for mixed precision.
An approach of mixed precision training to improve the speed and energy efficiency of training deep neural
network.
Args:
gradients (tuple[Tensor]): The gradients of `self.parameters`, and have the same shape as
`self.parameters`.
Returns:
tuple[Tensor], The gradients after loss scale.
"""
if self.need_scale:
gradients = self.map_(F.partial(_grad_scale, self.reciprocal_scale), gradients)
return gradients
def _grad_sparse_indices_deduplicate(self, gradients):
""" In the case of using big operators, deduplicate the 'indexes' in gradients."""
if self._target != 'CPU' and self._unique:
gradients = self.map_(F.partial(_indices_deduplicate), gradients)
return gradients
def _preprocess_weight_decay(self, weight_decay):
"""Check weight decay, and convert int to float."""
if isinstance(weight_decay, (float, int)):
weight_decay = float(weight_decay)
validator.check_non_negative_float(weight_decay, "weight_decay", self.cls_name)
return weight_decay
raise TypeError("Weight decay should be int or float.")
def _preprocess_grad_centralization(self, grad_centralization):
if not isinstance(grad_centralization, bool):
raise TypeError("The gradients centralization should be bool")
return grad_centralization
def _preprocess_single_lr(self, learning_rate):
"""Check lr value, and convert lr to a float, a Tensor or a LearningRateSchedule."""
if isinstance(learning_rate, (float, int)):
learning_rate = float(learning_rate)
validator.check_non_negative_float(learning_rate, "learning rate", self.cls_name)
return learning_rate
if isinstance(learning_rate, Tensor) and learning_rate.ndim == 0:
return learning_rate
self.dynamic_lr = True
if isinstance(learning_rate, Iterable):
return Tensor(np.array(list(learning_rate)).astype(np.float32))
if isinstance(learning_rate, Tensor):
if learning_rate.ndim > 1:
raise ValueError("The dim of `Tensor` type Learning rate should be a 0 or 1,"
f"but got {learning_rate.ndim}.")
if learning_rate.ndim == 1 and learning_rate.size < 2:
logger.warning("If use `Tensor` type dynamic learning rate, please make sure that the number"
"of elements in the tensor passed is greater than 1.")
return learning_rate
if isinstance(learning_rate, LearningRateSchedule):
return learning_rate
raise TypeError("Learning rate should be int, float, Tensor, Iterable or LearningRateSchedule.")
def _build_single_lr(self, learning_rate, name):
"""Build learning rate value, convert learning rate to a Parameter or a LearningRateSchedule."""
if isinstance(learning_rate, float):
learning_rate = Parameter(Tensor(learning_rate, mstype.float32), name)
if self.is_group_lr and self.dynamic_lr:
learning_rate = _ConvertToCell(learning_rate)
return learning_rate
if isinstance(learning_rate, Tensor) and learning_rate.ndim == 0:
learning_rate = Parameter(learning_rate, name)
if self.is_group_lr and self.dynamic_lr:
learning_rate = _ConvertToCell(learning_rate)
return learning_rate
if isinstance(learning_rate, Tensor) and learning_rate.ndim == 1:
return _IteratorLearningRate(learning_rate, name)
return learning_rate
def _check_group_params(self, parameters):
"""Check group params."""
parse_keys = ['params', 'lr', 'weight_decay', 'order_params', 'grad_centralization']
for group_param in parameters:
invalid_key = list(filter(lambda x: x not in parse_keys, group_param.keys()))
if invalid_key:
raise KeyError(f'The key "{invalid_key}" cannot be recognized in group params.')
if 'order_params' in group_param.keys():
if len(group_param.keys()) > 1:
raise ValueError("The order params dict in group parameters should "
"only include the 'order_params' key.")
if not isinstance(group_param['order_params'], Iterable):
raise TypeError("The value of 'order_params' should be an Iterable type.")
continue
if not group_param['params']:
raise ValueError("Optimizer got an empty group parameter list.")
for param in group_param['params']:
if not isinstance(param, Parameter):
raise TypeError("The group param should be an iterator of Parameter type.")
def _parse_group_params(self, parameters, learning_rate):
"""Parse group params."""
self._check_group_params(parameters)
if isinstance(learning_rate, Tensor) and learning_rate.ndim == 1:
tensor_lr_length = learning_rate.size
else:
tensor_lr_length = 0
for group_param in parameters:
if 'order_params' in group_param.keys():
if len(group_param.keys()) > 1:
raise ValueError("The order params dict in group parameters should "
"only include the 'order_params' key.")
if not isinstance(group_param['order_params'], Iterable):
raise TypeError("The value of 'order_params' should be an Iterable type.")
self.is_group_params_ordered = True
continue
if 'lr' in group_param.keys():
self.is_group_lr = True
group_lr = self._preprocess_single_lr(group_param['lr'])
if isinstance(group_lr, Tensor) and group_lr.ndim == 1:
group_lr_length = group_lr.size
if tensor_lr_length == 0:
tensor_lr_length = group_lr_length
elif group_lr_length != tensor_lr_length:
raise ValueError("The Tensor type dynamic learning rate in group should be the same size.")
def _init_group_params(self, parameters, learning_rate, weight_decay, grad_centralization):
"""Initialize learning rate, weight decay or grad centralization in group params."""
self._parse_group_params(parameters, learning_rate)
default_lr = self._build_single_lr(learning_rate, 'learning_rate')
params_store = []
for group_num, group_param in enumerate(parameters):
if 'order_params' in group_param.keys():
ordered_parameters = group_param['order_params']
continue
self.group_params += group_param['params']
if 'lr' in group_param.keys():
lr_param_name = 'learning_rate_group_' + str(group_num)
lr = self._preprocess_single_lr(group_param['lr'])
lr = self._build_single_lr(lr, lr_param_name)
else:
lr = default_lr
if 'weight_decay' in group_param.keys():
cur_weight_decay = self._preprocess_weight_decay(group_param['weight_decay'])
weight_decay_ = cur_weight_decay * self.loss_scale
else:
weight_decay_ = weight_decay * self.loss_scale
if 'grad_centralization' in group_param.keys():
self.grad_centralization = self._preprocess_grad_centralization(group_param['grad_centralization'])
for param in group_param['params']:
validator.check_value_type("parameter", param, [Parameter], self.cls_name)
if "conv" not in param.name and self.grad_centralization is True:
raise ValueError("Grad centralization can be perform only on the conv layer. If the parameter"
"is not a convolution layer, this parameter cannot be set to True.")
grad_centralization_ = self.grad_centralization
else:
grad_centralization_ = grad_centralization
for key in group_param.keys():
if key not in ('params', 'lr', 'weight_decay', 'grad_centralization'):
logger.warning(f"The optimizer cannot parse '{key}' when setting parameter groups.")
for param in group_param['params']:
validator.check_value_type("parameter", param, [Parameter], self.cls_name)
if param.name in params_store:
raise RuntimeError(f"The {param.name} parameter has appeared in parameter groups.")
params_store.append(param.name)
self.group_lr.append(lr)
self.group_weight_decay.append(weight_decay_)
self.group_grad_centralization.append(grad_centralization_)
if self.is_group_params_ordered:
self._order_and_adjust_group_params(ordered_parameters)
def _order_and_adjust_group_params(self, ordered_parameters):
"""
Order group parameter, learning rate, weight decay and grad centralization in group params.
"""
params_length = len(self.group_params)
if len(ordered_parameters) != len(self.group_params):
raise ValueError(f"The value of 'order_params' should be same with all group parameters.")
ordered_params = [None] * params_length
ordered_learning_rate = [None] * params_length
ordered_weight_decay = [None] * params_length
ordered_grad_centralization = [None] * params_length
params_name = [param.name for param in ordered_parameters]
for param, lr, wd, gc in zip(self.group_params, self.group_lr, self.group_weight_decay,
self.group_grad_centralization):
index = params_name.index(param.name)
ordered_params[index] = param
ordered_learning_rate[index] = lr
ordered_weight_decay[index] = wd
ordered_grad_centralization[index] = gc
self.group_params = ordered_params
self.group_lr = ordered_learning_rate
self.group_weight_decay = ordered_weight_decay
self.group_grad_centralization = ordered_grad_centralization
def get_lr(self):
"""
Get the learning rate of current step.
Returns:
float, the learning rate of current step.
"""
lr = self.learning_rate
if self.dynamic_lr:
if self.is_group_lr:
lr = ()
for learning_rate in self.learning_rate:
current_dynamic_lr = learning_rate(self.global_step)
lr += (current_dynamic_lr,)
else:
lr = self.learning_rate(self.global_step)
self.assignadd(self.global_step, self.global_step_increase_tensor)
return lr
def get_lr_parameter(self, param):
"""
Get the learning rate of parameter.
Args:
param (Union[Parameter, list[Parameter]]): The `Parameter` or list of `Parameter`.
Returns:
Parameter, single `Parameter` or `list[Parameter]` according to the input type.
"""
def get_lr_value(learning_rate):
if isinstance(learning_rate, (_ConvertToCell, _IteratorLearningRate)):
return learning_rate.learning_rate
return learning_rate
if isinstance(param, Parameter):
param_list = [param]
elif isinstance(param, list):
param_list = param
else:
raise TypeError(f"The parameter only support 'Parameter' or 'list' type.")
lr = []
ids = [id(p) for p in self.parameters]
for p in param_list:
validator.check_value_type("parameter", p, [Parameter], self.cls_name)
if id(p) not in ids:
raise ValueError(f"The parameter {p.name} is not in optimizer.")
if self.is_group_lr:
index = ids.index(id(p))
lr.append(get_lr_value(self.learning_rate[index]))
else:
lr.append(get_lr_value(self.learning_rate))
return lr if isinstance(param, list) else lr[0]
def _get_parameter_group_id(self):
"""
Get the parameter partition group id, which is less than the number of devices.
Returns:
tuple, the group id tuple of parameters.
"""
rank_list = ()
count = 0
for _ in range(self.param_length):
rank_list = rank_list + (count,)
count = count + 1
if count == self.dev_num:
count = 0
return rank_list
def broadcast_params(self, optim_result):
"""
Apply Broadcast operations in the sequential order of parameter groups.
Returns:
bool, the status flag.
"""
param_group = []
key_group = []
for _ in range(self.dev_num):
param_group.append(F.make_tuple())
key_group.append(F.make_tuple())
for i in range(self.param_length):
param_group[self.param_rank[i]] = param_group[self.param_rank[i]] + (self.parameters[i],)
key = P.MakeRefKey(self.param_names[i])()
key_group[self.param_rank[i]] = key_group[self.param_rank[i]] + (key,)
new_param_group = []
for root in range(self.dev_num):
ops = P.Broadcast(root)
if root > 0:
param_group[root] = F.depend(param_group[root], new_param_group[root-1])
next_params = ops(param_group[root])
new_param_group.append(next_params)
for i in range(F.tuple_len(next_params)):
F.assign(key_group[root][i], next_params[i])
return new_param_group
def construct(self, *hyper_params):
raise NotImplementedError
op_add = P.AddN()
op_gather = P.Gather()
op_mul = P.Mul()
op_gc = inner.Centralization()
_apply_decay = C.MultitypeFuncGraph("apply_decay")
_apply_grad_centralization = C.MultitypeFuncGraph("apply_grad_centralization")
@_apply_decay.register("Tensor", "Bool", "Tensor", "RowTensor")
def _tensor_apply_decay_with_sparse(weight_decay, if_apply, weight, gradient):
"""Get grad with weight_decay."""
if if_apply:
indices = gradient.indices
values = op_add((op_gather(weight, indices, 0) * F.cast(weight_decay, F.dtype(weight)), gradient.values))
shape = gradient.dense_shape
return RowTensor(indices, values, shape)
return gradient
@_apply_decay.register("Tensor", "Bool", "Tensor", "Tensor")
def _tensor_apply_decay(weight_decay, if_apply, weight, gradient):
"""Get grad with weight_decay."""
if if_apply:
return op_add((op_mul(weight, F.cast(weight_decay, F.dtype(weight))), gradient))
return gradient
@_apply_grad_centralization.register("Bool", "RowTensor")
def _tensor_apply_grad_centralization_with_sparse(if_apply, gradient):
"""Get grad with grad_centralization."""
if if_apply:
indices = gradient.indices
values = op_gc(gradient.values, -1)
shape = gradient.dense_shape
return RowTensor(indices, values, shape)
return gradient
@_apply_grad_centralization.register("Bool", "Tensor")
def _tensor_apply_grad_centralization(if_apply, gradient):
"""Get grad with grad_centralization."""
if if_apply:
return op_gc(gradient, -1)
return gradient
_grad_scale = C.MultitypeFuncGraph("grad_scale")
_indices_deduplicate = C.MultitypeFuncGraph("indices_deduplicate")
@_grad_scale.register("Number", "Tensor")
def tensor_grad_scale(scale, grad):
"""Get grad with scale."""
if scale == 1.0:
return grad
return op_mul(grad, F.cast(scale, F.dtype(grad)))
@_grad_scale.register("Tensor", "Tensor")
def tensor_grad_scale_with_tensor(scale, grad):
"""Get grad with scale."""
return op_mul(grad, F.cast(scale, F.dtype(grad)))
@_grad_scale.register("Tensor", "RowTensor")
def tensor_grad_scale_with_sparse(scale, grad):
"""Get grad with scale."""
return RowTensor(grad.indices, grad.values * F.cast(scale, F.dtype(grad.values)), grad.dense_shape)
@_indices_deduplicate.register("RowTensor")
def rowtensor_deduplicate_indices_slices(grad):
"""Unique the indices and sums the 'values' corresponding to the duplicate indices."""
indices = grad.indices
values = grad.values
unique_indices, index_position = P.Unique()(indices)
summed_values = P.UnsortedSegmentSum()(values, index_position, P.DynamicShape()(unique_indices)[0])
return RowTensor(unique_indices, summed_values, grad.dense_shape)
@_indices_deduplicate.register("Tensor")
def tensor_deduplicate_indice_slices(grad):
"""Return the input gradient directly in the dense sences."""
return grad
class _ConvertToCell(LearningRateSchedule):
"""Inner api, convert learning rate of scalar to LearningRateSchedule."""
def __init__(self, learning_rate):
super(_ConvertToCell, self).__init__()
if not isinstance(learning_rate, Parameter):
raise TypeError('Learning rate must be Parameter.')
self.learning_rate = learning_rate
def construct(self, global_step):
return self.learning_rate + 1.0 - 1.0
class _IteratorLearningRate(LearningRateSchedule):
"""Inner api, convert learning rate of Tensor(list) to LearningRateSchedule."""
def __init__(self, learning_rate, name):
super(_IteratorLearningRate, self).__init__()
if isinstance(learning_rate, Tensor):
if learning_rate.ndim != 1:
raise ValueError("The dim of `Tensor` type dynamic learning rate should be a 1,"
f"but got {learning_rate.ndim}.")
else:
raise TypeError("Learning rate should be Tensor.")
self.learning_rate = Parameter(learning_rate, name)
self.gather = P.Gather()
def construct(self, global_step):
return self.gather(self.learning_rate, global_step, 0)
|
py
|
1a5b2ad78ab8df12bc091711b9373e216dd82541
|
import time
import boto3
import interfaces
def _return_default_port_on_redshift_engines():
return 5439
def _return_default_custom_master_username_on_redshift_engines():
return 'awsuser'
class Tester(interfaces.TesterInterface):
def __init__(self):
self.aws_redshift_client = boto3.client('redshift')
self.cache = {}
self.user_id = boto3.client('sts').get_caller_identity().get('UserId')
self.account_arn = boto3.client('sts').get_caller_identity().get('Arn')
self.account_id = boto3.client('sts').get_caller_identity().get('Account')
self.redshift_clusters = self._get_all_redshift_clusters()
def declare_tested_service(self) -> str:
return 'redshift'
def declare_tested_provider(self) -> str:
return 'aws'
def run_tests(self) -> list:
return self.detect_redshift_cluster_encrypted() + \
self.detect_redshift_cluster_not_publicly_accessible() + \
self.detect_redshift_cluster_not_using_default_port() + \
self.detect_redshift_cluster_not_using_custom_master_username() + \
self.detect_redshift_cluster_using_logging() + \
self.detect_redshift_cluster_allow_version_upgrade() + \
self.detect_redshift_cluster_requires_ssl() + \
self.detect_redshift_cluster_not_using_ec2_classic() + \
self.get_redshift_cluster_not_encrypted_with_kms()
def _append_redshift_test_result(self, redshift, test_name, issue_status):
return {
"user": self.user_id,
"account_arn": self.account_arn,
"account": self.account_id,
"timestamp": time.time(),
"item": redshift['ClusterIdentifier'],
"item_type": "redshift_cluster",
"test_name": test_name,
"test_result": issue_status
}
def _return_redshift_logging_status(self, cluster_identifier):
return self.aws_redshift_client.describe_logging_status(ClusterIdentifier=cluster_identifier)
def _return_parameter_group_names(self, parameter_groups):
result = []
for pg in parameter_groups:
result.append(pg['ParameterGroupName'])
return result
def _return_cluster_parameter_data(self, group_name):
return self.aws_redshift_client.describe_cluster_parameters(ParameterGroupName=group_name)
def _return_ssl_enabled_on_parameter_groups(self, params):
ssl_enabled = False
for pg in params:
if pg['ParameterName'].lower() == 'require_ssl' and pg['ParameterValue'].lower() == 'true':
ssl_enabled = True
break
return ssl_enabled
def detect_redshift_cluster_encrypted(self):
test_name = "aws_redshift_encrypted_redshift_cluster"
result = []
for redshift in self.redshift_clusters['Clusters']:
if not redshift['Encrypted']:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_not_publicly_accessible(self):
test_name = "aws_redshift_not_publicly_accessible_redshift_cluster"
result = []
for redshift in self.redshift_clusters['Clusters']:
if redshift['PubliclyAccessible']:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_not_using_default_port(self):
test_name = "aws_redshift_cluster_not_using_default_port"
result = []
for redshift in self.redshift_clusters['Clusters']:
if _return_default_port_on_redshift_engines() == redshift['Endpoint']['Port']:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_not_using_custom_master_username(self):
test_name = "aws_redshift_cluster_not_using_custom_master_username"
result = []
for redshift in self.redshift_clusters['Clusters']:
if _return_default_custom_master_username_on_redshift_engines() == redshift['MasterUsername'].lower():
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_using_logging(self):
test_name = "aws_redshift_cluster_using_logging"
result = []
for redshift in self.redshift_clusters['Clusters']:
logging_metadata = self._return_redshift_logging_status(redshift['ClusterIdentifier'])
if not logging_metadata['LoggingEnabled']:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_allow_version_upgrade(self):
test_name = "aws_redshift_cluster_allow_version_upgrade"
result = []
for redshift in self.redshift_clusters['Clusters']:
if not redshift['AllowVersionUpgrade']:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def detect_redshift_cluster_requires_ssl(self):
test_name = "aws_redshift_cluster_requires_ssl"
result = []
for redshift in self.redshift_clusters['Clusters']:
issue_found = True
for parameter_group_name in self._return_parameter_group_names(redshift['ClusterParameterGroups']):
param_key_value = self._return_cluster_parameter_data(parameter_group_name)
if 'Parameters' in param_key_value and len(param_key_value['Parameters']):
if self._return_ssl_enabled_on_parameter_groups(param_key_value['Parameters']):
issue_found = False
if not issue_found:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
return result
def detect_redshift_cluster_not_using_ec2_classic(self):
test_name = "aws_redshift_cluster_not_using_ec2_classic"
result = []
for redshift in self.redshift_clusters['Clusters']:
if not ('VpcId' in redshift and redshift['VpcId']):
result.append(self._append_redshift_test_result(redshift, test_name, "issue_found"))
else:
result.append(self._append_redshift_test_result(redshift, test_name, "no_issue_found"))
return result
def get_redshift_cluster_not_encrypted_with_kms(self):
test_name = "aws_redshift_cluster_not_encrypted_with_KMS_customer_master_keys"
result = []
clusters = self.redshift_clusters["Clusters"]
for cluster in clusters:
encrypted = cluster["Encrypted"]
if encrypted:
result.append(self._append_redshift_test_result(cluster, test_name, "no_issue_found"))
else:
result.append(self._append_redshift_test_result(cluster, test_name, "issue_found"))
return result
def _get_all_redshift_clusters(self):
clusters = []
paginator = self.aws_redshift_client.get_paginator('describe_clusters')
response_iterator = paginator.paginate()
for page in response_iterator:
clusters.extend(page['Clusters'])
return { "Clusters" : clusters }
|
py
|
1a5b2b233bac9ebecb0f3d55bdacf846673f482e
|
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-10-07 11:08
import functools
from typing import Union, List, Dict, Any, Set
from hanlp_trie import DictInterface, TrieDict
from hanlp.common.dataset import SamplerBuilder
from hanlp.components.taggers.transformers.transformer_tagger import TransformerTagger
from hanlp.metrics.chunking.sequence_labeling import get_entities
from hanlp.metrics.f1 import F1
from hanlp.datasets.ner.loaders.json_ner import prune_ner_tagset
from hanlp.utils.string_util import guess_delimiter
from hanlp_common.util import merge_locals_kwargs
class TransformerNamedEntityRecognizer(TransformerTagger):
def __init__(self, **kwargs) -> None:
r"""A simple tagger using transformers and a linear layer with an optional CRF
(:cite:`lafferty2001conditional`) layer for
NER task. It can utilize whitelist gazetteers which is dict mapping from entity name to entity type.
During decoding, it performs longest-prefix-matching of these words to override the prediction from
underlying statistical model. It also uses a blacklist to mask out mis-predicted entities.
.. Note:: For algorithm beginners, longest-prefix-matching is the prerequisite to understand what dictionary can
do and what it can't do. The tutorial in `this book <http://nlp.hankcs.com/book.php>`_ can be very helpful.
Args:
**kwargs: Not used.
"""
super().__init__(**kwargs)
def build_metric(self, **kwargs):
return F1()
# noinspection PyMethodOverriding
def update_metrics(self, metric, logits, y, mask, batch, prediction):
for p, g in zip(prediction, self.tag_to_span(batch['tag'], batch)):
pred = set(p)
gold = set(g)
metric(pred, gold)
# noinspection PyMethodOverriding
def decode_output(self, logits, mask, batch, model=None):
output = super().decode_output(logits, mask, batch, model)
prediction = super().prediction_to_human(output, self.vocabs['tag'].idx_to_token, batch)
return self.tag_to_span(prediction, batch)
def tag_to_span(self, batch_tags, batch):
spans = []
sents = batch[self.config.token_key]
dict_whitelist = self.dict_whitelist
dict_blacklist = self.dict_blacklist
merge_types = self.config.get('merge_types', None)
for tags, tokens in zip(batch_tags, sents):
entities = get_entities(tags)
if dict_whitelist:
matches = dict_whitelist.tokenize(tokens)
if matches:
# Fix O E-LOC O like predictions
entities = get_entities(tags)
for label, start, end in entities:
if end - start == 1:
tags[start] = 'S-' + label
else:
tags[start] = 'B-' + label
for i in range(start + 1, end - 1):
tags[i] = 'I-' + label
tags[end - 1] = 'E-' + label
for start, end, label in matches:
if (not tags[start][0] in 'ME') and (not tags[end - 1][0] in 'BM'):
if end - start == 1:
tags[start] = 'S-' + label
else:
tags[start] = 'B-' + label
for i in range(start + 1, end - 1):
tags[i] = 'I-' + label
tags[end - 1] = 'E-' + label
entities = get_entities(tags)
if merge_types and len(entities) > 1:
merged_entities = []
begin = 0
for i in range(1, len(entities)):
if entities[begin][0] != entities[i][0] or entities[i - 1][2] != entities[i][1] \
or entities[i][0] not in merge_types:
merged_entities.append((entities[begin][0], entities[begin][1], entities[i - 1][2]))
begin = i
merged_entities.append((entities[begin][0], entities[begin][1], entities[-1][2]))
entities = merged_entities
if dict_blacklist:
pruned = []
delimiter_in_entity = self.config.get('delimiter_in_entity', ' ')
for label, start, end in entities:
entity = delimiter_in_entity.join(tokens[start:end])
if entity not in dict_blacklist:
pruned.append((label, start, end))
entities = pruned
spans.append(entities)
return spans
def decorate_spans(self, spans, batch):
batch_ner = []
delimiter_in_entity = self.config.get('delimiter_in_entity', ' ')
for spans_per_sent, tokens in zip(spans, batch.get(f'{self.config.token_key}_', batch[self.config.token_key])):
ner_per_sent = []
for label, start, end in spans_per_sent:
ner_per_sent.append((delimiter_in_entity.join(tokens[start:end]), label, start, end))
batch_ner.append(ner_per_sent)
return batch_ner
def generate_prediction_filename(self, tst_data, save_dir):
return super().generate_prediction_filename(tst_data.replace('.tsv', '.txt'), save_dir)
def prediction_to_human(self, pred, vocab, batch):
return self.decorate_spans(pred, batch)
def input_is_flat(self, tokens):
return tokens and isinstance(tokens, list) and isinstance(tokens[0], str)
def fit(self, trn_data, dev_data, save_dir, transformer,
delimiter_in_entity=None,
merge_types: List[str] = None,
average_subwords=False,
word_dropout: float = 0.2,
hidden_dropout=None,
layer_dropout=0,
scalar_mix=None,
grad_norm=5.0,
lr=5e-5,
transformer_lr=None,
adam_epsilon=1e-8,
weight_decay=0,
warmup_steps=0.1,
crf=False,
secondary_encoder=None,
reduction='sum',
batch_size=32,
sampler_builder: SamplerBuilder = None,
epochs=3,
tagset=None,
token_key=None,
max_seq_len=None,
sent_delimiter=None,
char_level=False,
hard_constraint=False,
transform=None,
logger=None,
seed=None,
devices: Union[float, int, List[int]] = None,
**kwargs):
"""Fit component to training set.
Args:
trn_data: Training set.
dev_data: Development set.
save_dir: The directory to save trained component.
transformer: An identifier of a pre-trained transformer.
delimiter_in_entity: The delimiter between tokens in entity, which is used to rebuild entity by joining
tokens during decoding.
merge_types: The types of consecutive entities to be merged.
average_subwords: ``True`` to average subword representations.
word_dropout: Dropout rate to randomly replace a subword with MASK.
hidden_dropout: Dropout rate applied to hidden states.
layer_dropout: Randomly zero out hidden states of a transformer layer.
scalar_mix: Layer attention.
grad_norm: Gradient norm for clipping.
lr: Learning rate for decoder.
transformer_lr: Learning for encoder.
adam_epsilon: The epsilon to use in Adam.
weight_decay: The weight decay to use.
warmup_steps: The number of warmup steps.
crf: ``True`` to enable CRF (:cite:`lafferty2001conditional`).
secondary_encoder: An optional secondary encoder to provide enhanced representation by taking the hidden
states from the main encoder as input.
reduction: The loss reduction used in aggregating losses.
batch_size: The number of samples in a batch.
sampler_builder: The builder to build sampler, which will override batch_size.
epochs: The number of epochs to train.
tagset: Optional tagset to prune entities outside of this tagset from datasets.
token_key: The key to tokens in dataset.
max_seq_len: The maximum sequence length. Sequence longer than this will be handled by sliding
window.
sent_delimiter: Delimiter between sentences, like period or comma, which indicates a long sentence can
be split here.
char_level: Whether the sequence length is measured at char level, which is never the case for
lemmatization.
hard_constraint: Whether to enforce hard length constraint on sentences. If there is no ``sent_delimiter``
in a sentence, it will be split at a token anyway.
transform: An optional transform to be applied to samples. Usually a character normalization transform is
passed in.
devices: Devices this component will live on.
logger: Any :class:`logging.Logger` instance.
seed: Random seed to reproduce this training.
**kwargs: Not used.
Returns:
The best metrics on training set.
"""
return super().fit(**merge_locals_kwargs(locals(), kwargs))
def build_vocabs(self, trn, logger, **kwargs):
super().build_vocabs(trn, logger, **kwargs)
if self.config.get('delimiter_in_entity', None) is None:
# Check the first sample to guess the delimiter between tokens in a NE
tokens = trn[0][self.config.token_key]
delimiter_in_entity = guess_delimiter(tokens)
logger.info(f'Guess the delimiter between tokens in named entity could be [blue]"{delimiter_in_entity}'
f'"[/blue]. If not, specify `delimiter_in_entity` in `fit()`')
self.config.delimiter_in_entity = delimiter_in_entity
def build_dataset(self, data, transform=None, **kwargs):
dataset = super().build_dataset(data, transform, **kwargs)
if isinstance(data, str):
tagset = self.config.get('tagset', None)
if tagset:
dataset.append_transform(functools.partial(prune_ner_tagset, tagset=tagset))
return dataset
@property
def dict_whitelist(self) -> DictInterface:
return self.config.get('dict_whitelist', None)
@dict_whitelist.setter
def dict_whitelist(self, dictionary: Union[DictInterface, Union[Dict[str, Any], Set[str]]]):
if dictionary is not None and not isinstance(dictionary, DictInterface):
dictionary = TrieDict(dictionary)
self.config.dict_whitelist = dictionary
@property
def dict_blacklist(self) -> DictInterface:
return self.config.get('dict_blacklist', None)
@dict_blacklist.setter
def dict_blacklist(self, dictionary: Union[DictInterface, Union[Dict[str, Any], Set[str]]]):
if dictionary is not None and not isinstance(dictionary, DictInterface):
dictionary = TrieDict(dictionary)
self.config.dict_blacklist = dictionary
|
py
|
1a5b2c5f3ee3a5dbbdaa36b2807054059ea91674
|
from http import HTTPStatus
def test_create_request(client):
url = "/create"
data = {
"events":"invitee.created",
"url":"https://blah.foo/bar"
}
response = client.post(url,json=data)
assert response.status_code == HTTPStatus.OK
def test_subscribe_request(client):
url = "/subscribe"
data = {
"hooksId":"497220"
}
response = client.post(url,json=data)
assert response.status_code == HTTPStatus.OK
def test_list_request(client):
url = "/subscribe/list"
response = client.get(url)
assert response.status_code == HTTPStatus.OK
def test_delete_request(client):
url = "/delete"
data = {
"hooksId":"497220"
}
response = client.post(url,json=data)
assert response.status_code == HTTPStatus.OK
def test_eventList_request(client):
url = "/event/type"
response = client.get(url)
assert response.status_code == HTTPStatus.OK
def test_about_request(client):
url = "/about"
response = client.get(url)
assert response.status_code == HTTPStatus.OK
|
py
|
1a5b2cbe9b44ed4e1758056b8cb7e00610cf5b0a
|
import asyncio
import logging
import struct
from . import package
from .constants import MQTTv50, MQTTCommands
logger = logging.getLogger(__name__)
class BaseMQTTProtocol(asyncio.StreamReaderProtocol):
def __init__(self, buffer_size=2**16, loop=None):
if not loop:
loop = asyncio.get_event_loop()
self._connection = None
self._transport = None
self._connected = asyncio.Event(loop=loop)
reader = asyncio.StreamReader(limit=buffer_size, loop=loop)
super(BaseMQTTProtocol, self).__init__(reader, loop=loop)
def set_connection(self, conn):
self._connection = conn
def _parse_packet(self):
raise NotImplementedError
def connection_made(self, transport: asyncio.Transport):
super(BaseMQTTProtocol, self).connection_made(transport)
logger.info('[CONNECTION MADE]')
self._transport = transport
self._connected.set()
def data_received(self, data):
super(BaseMQTTProtocol, self).data_received(data)
def write_data(self, data: bytes):
if not self._transport.is_closing():
self._transport.write(data)
else:
logger.warning('[TRYING WRITE TO CLOSED SOCKET]')
def connection_lost(self, exc):
self._connected.clear()
super(BaseMQTTProtocol, self).connection_lost(exc)
if exc:
logger.warning('[EXC: CONN LOST]', exc_info=exc)
else:
logger.info('[CONN CLOSE NORMALLY]')
async def read(self, n=-1):
bs = await self._stream_reader.read(n=n)
# so we don't receive anything but connection is not closed -
# let's close it manually
if not bs and not self._transport.is_closing():
self._transport.close()
# self.connection_lost(ConnectionResetError())
raise ConnectionResetError("Reset connection manually.")
return bs
class MQTTProtocol(BaseMQTTProtocol):
proto_name = b'MQTT'
proto_ver = MQTTv50
def __init__(self, *args, **kwargs):
super(MQTTProtocol, self).__init__(*args, **kwargs)
self._queue = asyncio.Queue()
self._disconnect = asyncio.Event()
self._read_loop_future = None
def connection_made(self, transport: asyncio.Transport):
super().connection_made(transport)
self._read_loop_future = asyncio.ensure_future(self._read_loop())
async def send_auth_package(self, client_id, username, password, clean_session, keepalive,
will_message=None, **kwargs):
pkg = package.LoginPackageFactor.build_package(client_id, username, password, clean_session,
keepalive, self, will_message=will_message, **kwargs)
self.write_data(pkg)
def send_subscribe_packet(self, topic, qos, **kwargs):
pkg = package.SubscribePacket.build_package(topic, qos, self, **kwargs)
self.write_data(pkg)
def send_simple_command_packet(self, cmd):
pkg = package.SimpleCommandPacket.build_package(cmd)
self.write_data(pkg)
def send_ping_request(self):
self.send_simple_command_packet(MQTTCommands.PINGREQ)
def send_publish(self, message):
mid, pkg = package.PublishPacket.build_package(message, self)
self.write_data(pkg)
return mid, pkg
def send_disconnect(self, reason_code=0, **properties):
pkg = package.DisconnectPacket.build_package(self, reason_code=reason_code, **properties)
self.write_data(pkg)
return pkg
def send_command_with_mid(self, cmd, mid, dup, reason_code=0):
pkg = package.CommandWithMidPacket.build_package(cmd, mid, dup, reason_code=reason_code,
proto_ver=self.proto_ver)
self.write_data(pkg)
async def _read_packet(self):
remaining_count = []
remaining_length = 0
remaining_mult = 1
while True:
byte, = struct.unpack("!B", await self.read(1))
remaining_count.append(byte)
if len(remaining_count) > 4:
logger.warning('[MQTT ERR PROTO] RECV MORE THAN 4 bytes for remaining length.')
return None
remaining_length += (byte & 127) * remaining_mult
remaining_mult *= 128
if byte & 128 == 0:
break
packet = b''
while remaining_length > 0:
chunk = await self.read(remaining_length)
remaining_length -= len(chunk)
packet += chunk
return packet
async def _read_loop(self):
await self._connected.wait()
while self._connected.is_set():
try:
byte = await self.read(1)
command, = struct.unpack("!B", byte)
packet = await self._read_packet()
self._connection.put_package((command, packet))
except ConnectionResetError as exc:
# This connection will be closed, because we received the empty data.
# So we can safely break the while
logger.debug("[RECV EMPTY] Connection will be reset automatically.")
break
def connection_lost(self, exc):
super(MQTTProtocol, self).connection_lost(exc)
self._connection.put_package((MQTTCommands.DISCONNECT, b''))
if self._read_loop_future is not None:
self._read_loop_future.cancel()
self._read_loop_future = None
self._queue = asyncio.Queue()
|
py
|
1a5b2d9b1ba4f45de5eba8f0e277b03f91665c5c
|
try:
from kaggle.api.kaggle_api_extended import KaggleApi
except Exception as error:
try:
from kaggle.api.kaggle_api_extended import KaggleApi
except ImportError:
raise ImportError('Kaggle API not properly set up')
pass
import datetime
import glob
import os
import sys
import pandas as pd
"""
Extracts data from three possible sources:
1) Lending club API (not in production)
2) Kaggle API (is very slow)
3) As a proxy for the other two, a local directory of the data from kaggle
"""
def get_raw_data(call_type='local', source_path="./Data/source/", save_bool=False, raw_path="./Data/raw/",
username=None, key=None, api_path=None, kaggle_dataset_name='accepted'):
if call_type == 'api':
""" Production implementation should connect to Lending Club API
# https://www.lendingclub.com/developers/versioning
"""
print('Starting LC API connection')
data = pd.DataFrame()
if data.empty:
print('DataFrame is empty from LC API!')
else:
print(data.head())
if save_bool:
save_raw(data, kaggle_dataset_name, raw_path)
return data
# Kaggle data
elif call_type == 'kaggle':
print('Starting Kaggle Scraping')
try:
if (username is not None) & (key is not None):
os.environ['KAGGLE_USERNAME'] = username # assign environment username
os.environ['KAGGLE_KEY'] = key # assign environment key from kaggle.com
api = KaggleApi() # connect to api
api.authenticate() # authenticate
# get list of files that are in dataset and return the newest "accepted" dataset
file = get_kaggle_file(api_path, api, kaggle_dataset_name)
# download accepted dataset VERY SLOW
api.dataset_download_file(dataset=api_path, file_name=file, path=source_path, force=True)
# unzip and convert data to pandas
data = pd.read_csv(source_path + "/" + file, compression='gzip', error_bad_lines=False)
if data.empty:
print("DataFrame is empty!")
else:
print(data.head())
if save_bool:
# save the untouched raw data in flat file warehouse (currently local directory but could be S3
save_raw(data, kaggle_dataset_name, raw_path)
return data
else:
print("No credentials provided, will try to retrieve from local source")
except Exception as exe:
print(sys.stderr, "Unable to access kaggle data")
print(sys.stderr, "Exception: %s" % str(exe))
sys.exit(1)
try:
print("Retrieving data from Local CSV")
# access source data from local directory
list_of_files = glob.glob('./Data/source/*%s*.csv' % kaggle_dataset_name)
# get newest accepted dataset
file = max(list_of_files, key=os.path.getctime)
data = pd.read_csv(file)
if data.empty:
print("DataFrame is empty, cannot find any data source")
sys.exit(1)
else:
print(data.head())
if save_bool:
save_raw(data, kaggle_dataset_name, raw_path)
return data
except Exception as exe:
print(sys.stderr, "Cannot access raw data. Please check dependencies are installed")
print(sys.stderr, "Exception: %s" % str(exe))
sys.exit(1)
def get_kaggle_file(path, a, name):
# kaggle api returns a list of data objects, each containing the metadata for every dataset on the page
dataset_info = a.dataset_list_files(path)
# get the file objects
dataset_obs = dataset_info.__getattribute__('files')
file_string = ''
max_date = datetime.datetime(1900, 1, 1)
for file in dataset_obs:
if name in file.__str__():
# find files with 'accepted' string in name and track the one that was created the most recently
if file.creationDate > max_date:
max_date = file.creationDate
assert isinstance(file.__str__(), object)
file_string = file.__str__()
return file_string
def save_raw(data, name, raw_path):
print("Raw Data Successfully Retrieved")
print("Saving Raw CSV file in Simple Storage Bucket Warehouse..........")
data.to_csv(raw_path + '{}_{}.csv'.format(name, datetime.datetime.today().strftime('%y_%m_%d')), index=False)
if __name__ == "__main__":
import doctest
doctest.testmod()
|
py
|
1a5b2de7cf7d3e97254b187b5b47528e3330a7f6
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# py_tst documentation build configuration file.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
# import py_tst
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = u'py-tst'
copyright = u"2021, [A[BAditya Bhatraju"
author = u"[A[BAditya Bhatraju"
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
# version = py_tst.__version__
# The full version, including alpha/beta/rc tags.
# release = py_tst.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_sidebars = {
"**": ["about.html", "navigation.html", "searchbox.html"]
}
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'py_tstdoc'
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(master_doc, 'py_tst.tex',
u'py-tst Documentation',
u'[A[BAditya Bhatraju', 'manual'),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'py_tst',
u'py-tst Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'py_tst',
u'py-tst Documentation',
author,
'py_tst',
'One line description of project.',
'Miscellaneous'),
]
|
py
|
1a5b2e789739badeeb7ac893b55b7bfd7602cc46
|
import time
from logging import LogRecord, getLogger, basicConfig
from logging.handlers import BufferingHandler
from multiprocessing.pool import ThreadPool
from ...backend_api.services import events
from ...config import config
buffer_capacity = config.get('log.task_log_buffer_capacity', 100)
class TaskHandler(BufferingHandler):
__flush_max_history_seconds = 30.
__once = False
@property
def task_id(self):
return self._task_id
@task_id.setter
def task_id(self, value):
self._task_id = value
def __init__(self, session, task_id, capacity=buffer_capacity):
super(TaskHandler, self).__init__(capacity)
self.task_id = task_id
self.session = session
self.last_timestamp = 0
self.counter = 1
self._last_event = None
self._thread_pool = ThreadPool(processes=1)
def shouldFlush(self, record):
"""
Should the handler flush its buffer?
Returns true if the buffer is up to capacity. This method can be
overridden to implement custom flushing strategies.
"""
# Notice! protect against infinite loops, i.e. flush while sending previous records
# if self.lock._is_owned():
# return False
# if we need to add handlers to the base_logger,
# it will not automatically create stream one when first used, so we must manually configure it.
if not TaskHandler.__once:
base_logger = getLogger()
if len(base_logger.handlers) == 1 and isinstance(base_logger.handlers[0], TaskHandler):
if record.name != 'console' and not record.name.startswith('trains.'):
base_logger.removeHandler(self)
basicConfig()
base_logger.addHandler(self)
TaskHandler.__once = True
else:
TaskHandler.__once = True
# if we passed the max buffer
if len(self.buffer) >= self.capacity:
return True
# if the first entry in the log was too long ago.
if len(self.buffer) and (time.time() - self.buffer[0].created) > self.__flush_max_history_seconds:
return True
return False
def _record_to_event(self, record):
# type: (LogRecord) -> events.TaskLogEvent
timestamp = int(record.created * 1000)
if timestamp == self.last_timestamp:
timestamp += self.counter
self.counter += 1
else:
self.last_timestamp = timestamp
self.counter = 1
# unite all records in a single second
if self._last_event and timestamp - self._last_event.timestamp < 1000 and \
record.levelname.lower() == str(self._last_event.level):
# ignore backspaces (they are often used)
self._last_event.msg += '\n' + record.getMessage().replace('\x08', '')
return None
self._last_event = events.TaskLogEvent(
task=self.task_id,
timestamp=timestamp,
level=record.levelname.lower(),
worker=self.session.worker,
msg=record.getMessage().replace('\x08', '') # ignore backspaces (they are often used)
)
return self._last_event
def flush(self):
if not self.buffer:
return
self.acquire()
buffer = self.buffer
try:
if not buffer:
return
self.buffer = []
record_events = [self._record_to_event(record) for record in buffer]
self._last_event = None
batch_requests = events.AddBatchRequest(requests=[events.AddRequest(e) for e in record_events if e])
except Exception:
batch_requests = None
print("Failed logging task to backend ({:d} lines)".format(len(buffer)))
finally:
self.release()
if batch_requests:
self._thread_pool.apply_async(self._send_events, args=(batch_requests, ))
def wait_for_flush(self):
self.acquire()
try:
self._thread_pool.close()
self._thread_pool.join()
except Exception:
pass
self._thread_pool = ThreadPool(processes=1)
self.release()
def _send_events(self, a_request):
try:
res = self.session.send(a_request)
if not res.ok():
print("Failed logging task to backend ({:d} lines, {})".format(len(a_request.requests), str(res.meta)))
except Exception:
print("Failed logging task to backend ({:d} lines)".format(len(a_request.requests)))
|
py
|
1a5b2ef74e048bd4b3e1db4070388e46999f0b27
|
from bigsi.bloom.bloomfilter import generate_hashes
from bigsi.bloom.bloomfilter import BloomFilter
|
py
|
1a5b2f19e692dace9ade33d845632cea0479cc88
|
"""
Various utilities for neural networks.
"""
import math
import torch as th
import torch.nn as nn
import torch.nn.functional as F
class GroupNorm32(nn.GroupNorm):
def __init__(self, num_groups, num_channels, swish, eps=1e-5):
super().__init__(num_groups=num_groups, num_channels=num_channels, eps=eps)
self.swish = swish
def forward(self, x):
y = super().forward(x.float()).to(x.dtype)
if self.swish == 1.0:
y = F.silu(y)
elif self.swish:
y = y * F.sigmoid(y * float(self.swish))
return y
def conv_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D convolution module.
"""
if dims == 1:
return nn.Conv1d(*args, **kwargs)
elif dims == 2:
return nn.Conv2d(*args, **kwargs)
elif dims == 3:
return nn.Conv3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def linear(*args, **kwargs):
"""
Create a linear module.
"""
return nn.Linear(*args, **kwargs)
def avg_pool_nd(dims, *args, **kwargs):
"""
Create a 1D, 2D, or 3D average pooling module.
"""
if dims == 1:
return nn.AvgPool1d(*args, **kwargs)
elif dims == 2:
return nn.AvgPool2d(*args, **kwargs)
elif dims == 3:
return nn.AvgPool3d(*args, **kwargs)
raise ValueError(f"unsupported dimensions: {dims}")
def zero_module(module):
"""
Zero out the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().zero_()
return module
def scale_module(module, scale):
"""
Scale the parameters of a module and return it.
"""
for p in module.parameters():
p.detach().mul_(scale)
return module
def normalization(channels, swish=0.0):
"""
Make a standard normalization layer, with an optional swish activation.
:param channels: number of input channels.
:return: an nn.Module for normalization.
"""
return GroupNorm32(num_channels=channels, num_groups=32, swish=swish)
def timestep_embedding(timesteps, dim, max_period=10000):
"""
Create sinusoidal timestep embeddings.
:param timesteps: a 1-D Tensor of N indices, one per batch element.
These may be fractional.
:param dim: the dimension of the output.
:param max_period: controls the minimum frequency of the embeddings.
:return: an [N x dim] Tensor of positional embeddings.
"""
half = dim // 2
freqs = th.exp(
-math.log(max_period) * th.arange(start=0, end=half, dtype=th.float32) / half
).to(device=timesteps.device)
args = timesteps[:, None].float() * freqs[None]
embedding = th.cat([th.cos(args), th.sin(args)], dim=-1)
if dim % 2:
embedding = th.cat([embedding, th.zeros_like(embedding[:, :1])], dim=-1)
return embedding
|
py
|
1a5b3272e1ddf7d6ae36690832117a697897f847
|
import sys
from threading import RLock
from typing import Dict, List, Optional, Tuple
from ..constants import MAXIMUM_TXDATA_CACHE_SIZE_MB, MINIMUM_TXDATA_CACHE_SIZE_MB
class Node:
previous: 'Node'
next: 'Node'
key: bytes
value: bytes
def __init__(self, previous: Optional['Node']=None, next: Optional['Node']=None,
key: bytes=b'', value: bytes=b'') -> None:
self.previous = previous if previous is not None else self
self.next = previous if previous is not None else self
self.key = key
self.value = value
# Derived from functools.lrucache, LRUCache should be considered licensed under Python license.
# This intentionally does not have a dictionary interface for now.
class LRUCache:
def __init__(self, max_count: Optional[int]=None, max_size: Optional[int]=None) -> None:
self._cache: Dict[bytes, Node] = {}
assert max_count is not None or max_size is not None, "need some limit"
if max_size is None:
max_size = MAXIMUM_TXDATA_CACHE_SIZE_MB * (1024 * 1024)
assert MINIMUM_TXDATA_CACHE_SIZE_MB * (1024 * 1024) <= max_size <= \
MAXIMUM_TXDATA_CACHE_SIZE_MB * (1024 * 1024), \
f"maximum size {max_size} not within min/max constraints"
self._max_size = max_size
self._max_count: int = max_count if max_count is not None else sys.maxsize
self.current_size = 0
self.hits = self.misses = 0
self._lock = RLock()
# This will be a node in a bi-directional circular linked list with itself as sole entry.
self._root = Node()
def set_maximum_size(self, maximum_size: int, resize: bool=True) -> None:
self._max_size = maximum_size
if resize:
with self._lock:
self._resize()
def get_sizes(self) -> Tuple[int, int]:
return (self.current_size, self._max_size)
def _add(self, key: bytes, value: bytes) -> Node:
most_recent_node = self._root.previous
new_node = Node(most_recent_node, self._root, key, value)
most_recent_node.next = self._root.previous = self._cache[key] = new_node
self.current_size += len(value)
return new_node
def __len__(self) -> int:
return len(self._cache)
def __contains__(self, key: bytes) -> bool:
return key in self._cache
def set(self, key: bytes, value: Optional[bytes]) -> Tuple[bool, List[Tuple[bytes, bytes]]]:
added = False
removals: List[Tuple[bytes, bytes]] = []
with self._lock:
node = self._cache.get(key, None)
if node is not None:
previous_node, next_node, old_value = node.previous, node.next, node.value
assert value != old_value, "duplicate set not supported"
previous_node.next = next_node
next_node.previous = previous_node
self.current_size -= len(old_value)
del self._cache[key]
removals.append((key, old_value))
if value is not None and len(value) <= self._max_size:
added_node = self._add(key, value)
added = True
# Discount the root node when considering count.
resize_removals = self._resize()
assert all(t[0] != added_node.key for t in resize_removals), "removed added node"
removals.extend(resize_removals)
return added, removals
def get(self, key: bytes) -> Optional[bytes]:
with self._lock:
node = self._cache.get(key)
if node is not None:
previous_node, next_node, value = node.previous, node.next, node.value
previous_node.next = next_node
next_node.previous = previous_node
most_recent_node = self._root.previous
most_recent_node.next = self._root.previous = node
node.previous = most_recent_node
node.next = self._root
self.hits += 1
return value
self.misses += 1
return None
def _resize(self) -> List[Tuple[bytes, bytes]]:
removals = []
while len(self._cache)-1 >= self._max_count or self.current_size > self._max_size:
node = self._root.next
previous_node, next_node, discard_key, discard_value = \
node.previous, node.next, node.key, node.value
previous_node.next = next_node
next_node.previous = previous_node
self.current_size -= len(discard_value)
del self._cache[discard_key]
removals.append((discard_key, discard_value))
return removals
|
py
|
1a5b32e9e3d3dc83468ece16315182a3b4e15a93
|
'''
Created on Apr 8, 2016
@author: jesus calvillo
Contains methods used to load and pre-process the corpus
'''
import numpy as np
import theano
from tools.similarities import binaryEquality
from containers import Situation,TrainingElement,InputObjectAP,Corpus,CorpusAP
from crossValidation import Fold
rawPrologFilePath='dataFiles/rawPrologOutput/model.prod_train.localist.set'
wordLocalistMapPath='dataFiles/map_localist_words.txt'
dsssMatrixPath="dataFiles/model_vectors"
'''
Loads the file in wordLocalistMapPath, creating a dictionary of words to localist vectors.
'''
def getWordLocalistMap(filename):
FILE=open(filename,'r')
indexToWord={}
for line in FILE:
segs=line.split('[')
word=segs[0][:-1]
vector=segs[1].strip()
vector=vector[:-1]
vector=vector.split(",")
vector=[float(i) for i in vector]
index=np.argmax(vector,axis=0)
indexToWord[index]=word
FILE.close()
return indexToWord
'''
Loads the file in dsssMatrixPath and returns a matrix concatenating all situation vectors of the atomic events
Returns also a list containing all basic events in the file
'''
def getAtomicEventDSSMap(filename):
FILE=open(filename,'r')
dsssMatrix=[]
events=[]
for line in FILE:
segments=line.split()
predicate=segments[0]
dss=segments[1:]
dss=np.asarray(dss).astype(theano.config.floatX) # @UndefinedVariable
dsssMatrix.append(dss)
events.append(predicate)
dsssMatrix=np.asarray(dsssMatrix)
return dsssMatrix,events
'''
Returns a map Word->Index, by taking a map Index->Word
'''
def getMapWordToIndex(mapIndexWord):
mapWordToIndex={}
for key,value in mapIndexWord.iteritems():
mapWordToIndex[value]=key
return mapWordToIndex
'''
Takes a list of TrainingElement instances and appends the localist representation of a period to wordsLocalist
'''
def addPeriods(trainingElementList,vocabSize):
dot=[0]*vocabSize
dot.append(1)
for item in trainingElementList:
if not hasattr(item, 'period')or item.period==False:
for i in xrange(len(item.wordsLocalist)):
item.wordsLocalist[i]=np.append(item.wordsLocalist[i],[0])
item.wordsLocalist.append(dot)
item.period=True
item.testItem=item.testItem+ " ."
'''
Takes a list of TrainingElement instances and sets its input type
'''
def setInputType(trainingElementList,inputType):
if inputType=="dss":
for sent in trainingElementList: sent.input=sent.dss150
if inputType=="sitVector":
for sent in trainingElementList: sent.input=sent.sitVector
if inputType=="compVector":
for sent in trainingElementList: sent.input=sent.compVector
if inputType=="beliefVector":
for sent in trainingElementList: sent.input=sent.DSSValue
'''
Takes a list of TrainingElement instances and sets its input type
'''
def setOutputType(trainingElementList,outputType):
if outputType=="dss":
for sent in trainingElementList: sent.output=sent.dss150
if outputType=="sitVector":
for sent in trainingElementList: sent.output=sent.sitVector
if outputType=="compVector":
for sent in trainingElementList: sent.output=sent.compVector
if outputType=="beliefVector":
for sent in trainingElementList: sent.output=sent.DSSValue
#############################################################################################################
#### CONDITIONAL PROBABILITY SCORES AND COMPREHENSION SCORES
#############################################################################################################
def getConditionalProbs(dssVector,dsssMatrix):
dotp=np.dot(dsssMatrix,dssVector)
return dotp/np.sum(dssVector)
'''
Takes a list of TrainingElement instances (that already have beliefVector), the DSS situation space matrix, and its dimensionality (150 or 25000)
Returns the same list but adding the comprehension scores according to Frank et al. (2009)'s paper
'''
def getComprehensionVectorsBelief(trainingElementList,eventMatrix,dimensionality):
eventPriors=np.sum(eventMatrix,axis=1)/dimensionality
for item in trainingElementList:
item.compVector=item.beliefVector-eventPriors
compVector=[];
for elem,posterior,prior in zip(item.compVector,item.beliefVector,eventPriors):
compScore=0.0;
if posterior>prior:
compScore=elem/(1.0-prior)
else:
compScore=elem/prior
compVector.append(compScore)
item.compVector=np.asarray(compVector)
#############################################################################################################
#### LOAD AND OBTAIN CORPUS FROM RAW PROLOG-OUTPUT FILES
#############################################################################################################
'''
Takes a file containing the output of the prolog file with dss-sentences and the full 25K situation vectors
Returns a list of TrainingElement instances, where each of the latter is a sentence couple with all its information
It computes the belief vector directly and puts it into each TrainingElement
'''
def loadAndConvertRawCorpus_belief(inputPath, matrixPath):
trainingCorpus=[]
dsssMatrixBelief,_= getAtomicEventDSSMap(matrixPath)
with open(inputPath,'rb') as inputFile:
trainingItem=0
while True:
line=inputFile.readline()
if not line:
if trainingItem!=0:
trainingCorpus.append(trainingItem)
break
if((len(line.split())>0)and(line.split()[0]=='Item')):
headerSegs= line.split("\"")
testItem=headerSegs[1]
numberOfWords=int(headerSegs[2])
semantics=headerSegs[3]
if trainingItem!=0:
trainingCorpus.append(trainingItem)
dss=[]
beliefVector=[]
wordsLocalist=[]
trainingItem=TrainingElement(testItem,numberOfWords,semantics,wordsLocalist,dss)
if((len(line.split())>0)and(line.split()[0]=='Input')):
segs= line.split()
if len(beliefVector)==0:
beliefVector= segs[1:25001]
beliefVector= np.asarray(beliefVector).astype(theano.config.floatX) # @UndefinedVariable
dotp=np.dot(dsssMatrixBelief,beliefVector)
beliefVector=dotp/np.sum(beliefVector)
trainingItem.beliefVector=beliefVector
wordLocalist= segs[25002:]
wordLocalist=[int(round(float(i))) for i in wordLocalist]
wordLocalist=np.asarray(wordLocalist).astype('int8')
wordsLocalist.append(wordLocalist)
return trainingCorpus
'''
Takes a file containing the output of the prolog file with dss-sentences
Returns a list of TrainingElement instances, where each of the latter is a sentence couple with all its information
Depending on the size of the DSS representation (vectorSize) obtained from the prolog file:
150: 150-dimensional dss vectors obtained after the dimensionality reduction
25K: full situation vectors with no dimensionality reduction
10: situations that are impossible in the microworld, in this case the vectors contains only zeros
Testing with Impossible events
# corpus=loadRawCorpus_VectorSize("dataFiles/filesWith0P/model.prod_train_passive.localist_p0.set",10,False)
# for elem in corpus:
# print elem.testItem
'''
def loadRawCorpus_VectorSize(inputPath,vectorSize,active):
trainingCorpus=[]
with open(inputPath,'rb') as inputFile:
trainingItem=0
while True:
line=inputFile.readline()
if not line:
if trainingItem!=0:
trainingCorpus.append(trainingItem)
break
if((len(line.split())>0)and(line.split()[0]=='Item')):
headerSegs= line.split("\"")
schema=headerSegs[0].split()[1]
testItem=headerSegs[1]
numberOfWords=int(headerSegs[2])
semantics=headerSegs[3]
if trainingItem!=0:
trainingCorpus.append(trainingItem)
semanticVector=[]
wordsLocalist=[]
trainingItem=TrainingElement(schema,testItem,numberOfWords,semantics,wordsLocalist,semanticVector,active)
if((len(line.split())>0)and(line.split()[0]=='Input')):
segs= line.split()
if len(semanticVector)==0:
semanticVector= segs[1:vectorSize+1]
semanticVector= np.asarray(semanticVector).astype(theano.config.floatX) # @UndefinedVariable
trainingItem.DSSValue=semanticVector
wordLocalist= segs[vectorSize+2:]
wordLocalist=[int(round(float(i))) for i in wordLocalist]
wordLocalist=np.asarray(wordLocalist).astype('int8')
wordsLocalist.append(wordLocalist)
addPeriods(trainingCorpus,42) #add periods to the sentences, 42 is the vocabulary size
return trainingCorpus
'''
Takes a list of TrainingElement instances, obtained from loadRoadCorpus_VectorSize, which contains either only
active or passive sentences
Puts all semantically equivalent sentences into one InputObjectAP, which is also put into a list
'''
def getCollapsedCorpus25K(normalCorpus,active):
collapsedCorpus=[]
for trainingElement in normalCorpus:
match=False
for item in collapsedCorpus:
equal= binaryEquality(trainingElement.DSSValue,item.value)
if equal:
match=True
item.sentences.append(trainingElement)
break
if not match:
newItem=InputObjectAP(trainingElement.DSSValue,active)
newItem.sentences.append(trainingElement)
collapsedCorpus.append(newItem)
return collapsedCorpus
'''
Loads the active and passive sentences from the prolog-output files forming lists of TrainingElement
To the loaded sentences, it adds the 150-dimensional dss
Puts together sentences with equivalent semantics and saves the resulting corpora to file
Each file contains either only active or only passive sentences.
'''
def getRawCorporaAP25KWith150DSS(prologActPath,prolog150ActPath,prologPasPath,prolog150PasPath,tag):
def put150DSSinto25K(corpus25Kte,corpus150te):
for elem in corpus25Kte:
for elem150 in corpus150te[:]:
if elem.testItem==elem150.testItem:
elem.dss150=elem150.DSSValue
corpus150te.remove(elem150)
return corpus25Kte
###Active Sentences
corpusAct25K=loadRawCorpus_VectorSize(prologActPath,25000,True)
corpusAct150=loadRawCorpus_VectorSize(prolog150ActPath,150,True)
corpusAct25K=put150DSSinto25K(corpusAct25K,corpusAct150)
corpusActClustered=getCollapsedCorpus25K(corpusAct25K,True) #returns a list of InputObjectAP{value, ap, sentences[]}
print len(corpusActClustered)
corpusObjectAct=Corpus(corpusActClustered)
#corpusObjectAct.saveToPickle("corpusActive25KClustered_"+tag+".pick")
###Passive Sentences
corpusPas25K=loadRawCorpus_VectorSize(prologPasPath,25000,False)
corpusPas150=loadRawCorpus_VectorSize(prolog150PasPath,150,False)
corpusPas25K=put150DSSinto25K(corpusPas25K,corpusPas150)
corpusPasClustered=getCollapsedCorpus25K(corpusPas25K,False) #returns a list of InputObjectAP{value, ap, sentences[]}
print len(corpusPasClustered)
corpusObjectPas=Corpus(corpusPasClustered)
#corpusObjectPas.saveToPickle("corpusPassive25KClustered_"+tag+".pick")
return corpusObjectAct.elements,corpusObjectPas.elements
'''
Takes two lists, one containing all IntputObjectAP related to active sentences and one related to passive sentences
Puts together the InputObjectAPs into a Situation object if they have equivalent DSSs
Creates 2 lists of Situations: the first one corresponds to all the dss that contain both actives and passives
the second corresponds to dsss that only have actives
With that, creates a CorpusAP object and saves it to File
'''
def collapseActPasIOs_ToSituationsInCorpusAP(activeIOs,passiveIOs):
APSits=[]
ASits=[]
for dssp in passiveIOs:
for dssa in activeIOs[:]:
if binaryEquality(dssp.value,dssa.value):
newSit=Situation(dssp.value,dssa.sentences,dssp.sentences)
APSits.append(newSit)
activeIOs.remove(dssa)
for dssa in activeIOs[:]:
newSit=Situation(dssa.value,dssa.sentences)
ASits.append(newSit)
corpusAP = CorpusAP(APSits,ASits)
return corpusAP
'''
Takes a CorpusAP object in which the Situation's sit.value is equal to the 25K situation vector
Takes also the original 25Kx44 DSS matrix
Computes the belief vectors and uses them to replace the sit.value's and the trainElem.DSSValue
Saves it to file
'''
def convertCorpusAP25KToBelief(corpusAP25K,dsssMatrix):
def beliefVect(vector25K):
dotp=np.dot(dsssMatrix,vector25K)
return dotp/np.sum(vector25K)
for sit in corpusAP25K.actpas:
sit.value=beliefVect(sit.value)
for trainElem in sit.actives:
trainElem.DSSValue=sit.value
for trainElem in sit.passives:
trainElem.DSSValue=sit.value
for sit in corpusAP25K.act:
sit.value=beliefVect(sit.value)
for trainElem in sit.actives:
trainElem.DSSValue=sit.value
return corpusAP25K
'''
Takes a CorpusAP object with 150 dss vectors and ads the active/passive bit to the 150-dimensional dss
Since the clustering is done using the 25K vectors, it's possible that sentences related to the same situation
have different 150-dss vectors, because of that the bit-appending is done per-sentence
'''
def addAPBitCorpusAP25Kto150DSS(corpusAP25K):
for sit in corpusAP25K.actpas:
for item in sit.actives:
item.dss150=np.append(item.dss150,1.0)
for item in sit.passives:
item.dss150=np.append(item.dss150,0.0)
for sit in corpusAP25K.act:
for item in sit.actives:
item.dss150=np.append(item.dss150,1.0)
return corpusAP25K
'''
Takes a CorpusAP (with belief vectors but not necessarily) and adds the active/passive bit
'''
def addAPBitCorpusAP25K(corpusAP25K):
def addActPasBit(vector):
actdss=np.append(vector,1.0)
pasdss=np.append(vector,0.0)
return actdss,pasdss
for sit in corpusAP25K.actpas:
actdss,pasdss=addActPasBit(sit.value)
for item in sit.actives:
item.DSSValue=actdss
for item in sit.passives:
item.DSSValue=pasdss
for sit in corpusAP25K.act:
actdss,pasdss=addActPasBit(sit.value)
for item in sit.actives:
item.DSSValue=actdss
return corpusAP25K
'''
Takes a CorpusAP and sets up the item.equivalents variable of the TrainingElement instances
'''
def setupEquivalentsAP25K(corpusAP25K):
for sit in corpusAP25K.actpas:
for item in sit.actives:
item.equivalents=sit.actives
for item in sit.passives:
item.equivalents=sit.passives
for sit in corpusAP25K.act:
for item in sit.actives:
item.equivalents=sit.actives
'''
Calculates the prior probability of each sentence production rule (schema) in the trainingSet
'''
def getSchemaPriors(trainingSet):
counts={}
for x in xrange(1,52):
counts[x]=0
for elem in trainingSet:
counts[int(elem.schema)]+=1
probs={}
for x in xrange(1,52):
probs[x]=counts[x]*1.0/len(trainingSet)
return probs
'''
Takes the raw prolog-output files and creates a CorpusAP object. This object includes belief vectors and 150-dim DSS vectors
Also derivation length vectors
activeRawFile,passiveRawFile: Paths to the prolog-output files corresponding to the actives and passives respectively
dsssMatrixPath: Path to the 25K-dimensional DSS Matrix
tag: Tag to be used to name the files
act150path,pas150path Paths to the corresponding prolog-output files but with 150-dimesional DSSs
'''
def processCorpusAP25KFromRaw(activeRawFile,passiveRawFile,dsssMatrixPath,tag,act150path,pas150path):
matrix,_=getAtomicEventDSSMap(dsssMatrixPath)
activeIOs,passiveIOs=getRawCorporaAP25KWith150DSS(activeRawFile,act150path,passiveRawFile,pas150path,tag)
#2 FILES NOT! CREATED: "corpusActive25KClustered_"+tag+".pick","corpusPassive25KClustered_"+tag+".pick"
print "RAW CORPORA LOADED... PASSIVES AND ACTIVES SEPARATED"
corpusAP25K=collapseActPasIOs_ToSituationsInCorpusAP(activeIOs,passiveIOs)
#corpusAP25K.saveToPickle("corpusAP25K_"+tag+".pick")
print "ACTIVES AND PASSIVES CLUSTERED AND COLLAPSED INTO A CorpusAP"
corpusAPFinal=convertCorpusAP25KToBelief(corpusAP25K,matrix)
print "CorpusAP CONVERTED TO BELIEF VECTORS"
addAPBitCorpusAP25K(corpusAPFinal) #add voice bit to belief vectors
addAPBitCorpusAP25Kto150DSS(corpusAPFinal) #add voice bit to dss150 vectors
setupEquivalentsAP25K(corpusAPFinal)
print "Added act/pas BIT TO TRAINITEMS, AND EQUIVALENTS SET"
mapIndexWord=getWordLocalistMap(wordLocalistMapPath)
mapWordToIndex=getMapWordToIndex(mapIndexWord)
corpusAPFinal=getDerLengthsTrainingVectors(corpusAPFinal,mapWordToIndex)
print "DERIVATION LENGTHS VECTORS ADDED"
corpusAPFinal.saveToPickle("corpusAPFinal_"+tag+".pick")
return corpusAPFinal
############################################################################################################
### TEST CONDITIONS 6-9
############################################################################################################
def getCorpusCondition6_8(corpusAP,filename,phrase,phrase2=""):
'''
Gets a corpusAP and returns 3 list, one (test items) with the situations that contain a sentence with "phrase" and "phrase2".
The second list contains a trainingset withh all TrainingElements of the remaining situations
The third list contains a test set for training, where only one TrainingElement is given per situation in the training set.
'''
cond=[]
trainingSet=[]
trainTestSet=[]
def check_validity(elements_situation,phrase,phrase2):
for elem in elements_situation:
if elem.testItem.find(phrase) !=-1 and elem.testItem.find(phrase2) !=-1:
return True
return False
for situ in corpusAP.actpas:
if check_validity(situ.actives, phrase,phrase2): cond.append(situ.actives[0])
else:
trainTestSet.append(situ.actives[0])
trainingSet.extend(situ.actives)
if check_validity(situ.passives,phrase,phrase2): cond.append(situ.passives[0])
else:
trainTestSet.append(situ.passives[0])
trainingSet.extend(situ.passives)
for situ in corpusAP.act:
if check_validity(situ.actives, phrase, phrase2):cond.append(situ.actives[0])
else:
trainTestSet.append(situ.actives[0])
trainingSet.extend(situ.actives)
for tItem in cond:
print tItem.testItem
print len(trainingSet)
cond_fold= Fold ([trainingSet,trainTestSet],[cond])
cond_fold.saveToPickle(filename)
return cond_fold
def get_imaginary_TrainingElement(tItem,bevents,voice):
import copy
all_locations=["bedroom","bathroom","street","playground"]
all_people=["charlie","sophia","heidi"]
resulting_TEs=[]
if tItem.semantics.find("chess")>-1:game="chess"
elif tItem.semantics.find("soccer")>-1:game="soccer"
elif tItem.semantics.find("hide_and_seek")>-1:game="hide_and_seek"
else:game="nogame"
if tItem.semantics.find("charlie")>-1:person="charlie"
elif tItem.semantics.find("sophia")>-1:person="sophia"
elif tItem.semantics.find("heidi")>-1:person="heidi"
else: person="noperson"
if game=="chess":locations=["bathroom","playground","street"]
if game=="soccer":locations=["bathroom","playground","bedroom"]
if game=="hide_and_seek":locations=["street"]
for target_place in locations:
newTE=copy.deepcopy(tItem)
if voice=="active":newTE.testItem=person+" plays "+game+" in the "+target_place+" ."
elif voice=="passive":newTE.testItem=game+" is played by "+person+" in the "+target_place+" ."
#target_preds=["place("+persona+","+target_place+")" for persona in all_people]
target_preds=["place("+person+","+target_place+")"]
anti_target_preds=[]
for persona in all_people:
anti_target_preds_p=["place("+persona+","+location+")" for location in all_locations if location != target_place]
anti_target_preds.extend(anti_target_preds_p)
for targ_pred in target_preds:
newTE.DSSValue[bevents.index(targ_pred)]=1.0
for anti_target_p in anti_target_preds:
newTE.DSSValue[bevents.index(anti_target_p)]=0.0
resulting_TEs.append(newTE)
#===================================================================
# for ((index,event),condp) in zip(enumerate(bevents),newTE.DSSValue):
# print index,event,condp
#
return resulting_TEs
def getCorpusCondition_Imaginary(corpusAP,bevents,filename):
'''
Gets a corpusAP and returns 3 list, one with the situations that are not allowed by the microworld (e.g. charlie plays soccer in the bedroom)
The second list contains a trainingset withh all TrainingElements in the dataset
The third list contains a test set for training, where only one TrainingElement is given per situation in the training set.
'''
condA_selected=[]
condP_selected=[]
condA=[]
condP=[]
trainingSet=[]
trainTestSet=[]
phrases_active=["plays chess .","plays soccer .",'plays hide_and_seek .']
phrases_passive=["chess is played by","soccer is played by", "hide_and_seek is played by" ]
for situ in corpusAP.actpas:
testSitu=False
for item in situ.actives:
for phrase in phrases_active:
if (item.testItem.find(phrase) != -1) and (item.testItem.find("someone")==-1) and (item.testItem.find("a girl")==-1):
testSitu=True
#print item.testItem
break
if testSitu: condA_selected.append(situ.actives[0])
trainingSet.extend(situ.actives)
trainTestSet.append(situ.actives[0])
testSitu=False
for item in situ.passives:
for phrase in phrases_passive:
if (item.testItem.find(phrase) > -1) and (item.testItem.find("someone")==-1) and (item.testItem.find("a girl")==-1)and (item.testItem.find("inside")==-1) and (item.testItem.find("outside")==-1) and (item.testItem.find("in the")==-1):
testSitu=True
#print item.testItem
break
if testSitu: condP_selected.append(situ.passives[0])
trainingSet.extend(situ.passives)
trainTestSet.append(situ.passives[0])
for situ in corpusAP.act:
trainingSet.extend(situ.actives)
trainTestSet.append(situ.actives[0])
for tItem in condA_selected:
print tItem.testItem
condA.extend(get_imaginary_TrainingElement(tItem, bevents,"active"))
print
for tItem in condA:
print tItem.testItem
print
for tItem in condP_selected:
print tItem.testItem
condP.extend(get_imaginary_TrainingElement(tItem, bevents,"passive"))
print
for tItem in condP:
print tItem.testItem
cond_fold= Fold ([trainingSet,trainTestSet],[condA,condP])
cond_fold.saveToPickle(filename)
return cond_fold
#############################################################################################################
#############################################################################################################
#############################################################################################################
if __name__ == '__main__':
#==============================================================================
# ####TO GET THE CURRENT FILES WITH SCHEMA INFO and original 150dss values
# activeSentencesPrologFile="dataFiles/rawPrologOutput/model.prod_train.localist_schemas.set"
# passiveSentencesPrologFile="dataFiles/rawPrologOutput/model.prod_train_passive.localist_schemas.set"
# dssMatrixFile="dataFiles/model.observations"
# label="thesisCorpus"
# activeSents150dims="dataFiles/rawPrologOutput/filesOriginal150DSS/model.prod_train.localist.set"
# passiveSents150dims="dataFiles/rawPrologOutput/filesOriginal150DSS/model.prod_train_passive.localist.set"
#
# corpusAPFinal=processCorpusAP25KFromRaw(activeSentencesPrologFile,passiveSentencesPrologFile,dssMatrixFile,label,activeSents150dims,passiveSents150dims)
#
#
# corpusAPFinal=CorpusAP()
# corpusAPFinal.loadFromPickle("dataFiles/files-thesis/corpusAPFinal_thesis.pick")
#
# from crossValidation import getKFinalTrainTestCondFolds
# getKFinalTrainTestCondFolds(10,corpusAPFinal,"thesis",14,"trainTest_Cond-thesis")
#
# corpusLMFilename="dataFiles/files-thesis/corpusUID-thesis.pick"
# corpusLM=getCorpusUID(corpusAPFinal, corpusLMFilename)
#
#==============================================================================
corpusAPFinal=CorpusAP()
corpusAPFinal.loadFromPickle("dataFiles/corpusAPFinal.pick")
_,events=getAtomicEventDSSMap(dsssMatrixPath)
#fold_6=getCorpusCondition6_8(corpusAPFinal, "dataFiles/corpus_folds/fold_cond6_plays_chess.pick", " plays chess")
#fold_6=getCorpusCondition6_8(corpusAPFinal, "dataFiles/corpus_folds/fold_cond6_plays_soccer.pick", " plays soccer")
#fold_6=getCorpusCondition6_8(corpusAPFinal "dataFiles/corpus_folds/fold_cond6_plays_hideandseek.pick", " plays hide_and_seek",)
#fold_7=getCorpusCondition6_8(corpusAPFinal, "dataFiles/corpus_folds/fold_cond7_loses_at_chess.pick","loses","at chess")
#fold_7=getCorpusCondition6_8(corpusAPFinal, "dataFiles/corpus_folds/fold_cond7_loses_at_soccer.pick","loses", " at soccer")
#fold_7=getCorpusCondition6_8(corpusAPFinal, "dataFiles/corpus_folds/fold_cond7_loses_at_hideandseek.pick","loses"," at hide_and_seek")
#fold_8=getCorpusCondition6_8(corpusAPFinal, "dataFiles/corpus_folds/fold_cond8_chess_is_played.pick", "chess is played")
#fold_8=getCorpusCondition6_8(corpusAPFinal", "dataFiles/corpus_folds/fold_cond8_soccer_is_played.pick", "soccer is played)
#fold_8=getCorpusCondition6_8(corpusAPFinal, "dataFiles/corpus_folds/fold_cond8_hideandseek_is_played.pick", "hide_and_seek is played")
fold_cond_imaginary=getCorpusCondition_Imaginary(corpusAPFinal,events,"dataFiles/corpus_folds/fold_cond_imaginary11.pick")
for item in fold_cond_imaginary.valtestSet[0]:
print item.testItem
for ev,condp in zip(events,item.DSSValue):
print ev,condp
print
#corpusAPFinal.find_and_print_sentence(events,"hide_and_seek is played by charlie .")
|
py
|
1a5b32f210f8a53be7743f896c22b3fb74715448
|
from __future__ import annotations
import abc
from contextlib import contextmanager
from typing import Any
from typing import Generator
from eggbot.provider.db_connector import Cursor
from eggbot.provider.db_connector import DBConnection
class DBStoreIntfc(abc.ABC):
"""ABC for all database store providers"""
# Reusable code
@contextmanager
def get_cursor(self) -> Generator[Cursor, None, None]:
"""Context manager for creating a database cursor. Does not commit changes."""
cursor = self.dbconn.cursor()
try:
yield cursor
finally:
cursor.close()
# Override the following methods for each implementation
@abc.abstractmethod
def __init__(self, db_connection: DBConnection) -> None:
self.dbconn = db_connection # pragma: no cover
raise NotImplementedError()
@abc.abstractmethod
def get(self) -> list[Any]:
"""Override for database specific get method"""
raise NotImplementedError()
@abc.abstractmethod
def save(self, event: str) -> None:
"""Override for database specific save method"""
raise NotImplementedError()
@abc.abstractmethod
def delete(self, uid: str) -> None:
"""Override for database specific delete method"""
raise NotImplementedError()
@abc.abstractmethod
def _to_model(self, row: list[list[Any]]) -> list[Any]:
"""Override to model database rows to object"""
raise NotImplementedError()
|
py
|
1a5b33064cec6bc8a2d0094ba22f60511dc9c466
|
from great_expectations.core.usage_statistics.anonymizers.anonymizer import Anonymizer
from great_expectations.core.usage_statistics.anonymizers.store_backend_anonymizer import (
StoreBackendAnonymizer,
)
from great_expectations.data_context.store import (
EvaluationParameterStore,
ExpectationsStore,
HtmlSiteStore,
MetricStore,
Store,
ValidationsStore,
)
class StoreAnonymizer(Anonymizer):
def __init__(self, salt=None):
super(StoreAnonymizer, self).__init__(salt=salt)
# ordered bottom up in terms of inheritance order
self._ge_classes = [
ValidationsStore,
ExpectationsStore,
EvaluationParameterStore,
MetricStore,
Store,
HtmlSiteStore,
]
self._store_backend_anonymizer = StoreBackendAnonymizer(salt=salt)
def anonymize_store_info(self, store_name, store_obj):
anonymized_info_dict = dict()
anonymized_info_dict["anonymized_name"] = self.anonymize(store_name)
store_backend_obj = store_obj.store_backend
self.anonymize_object_info(
object_=store_obj,
anonymized_info_dict=anonymized_info_dict,
ge_classes=self._ge_classes,
)
anonymized_info_dict[
"anonymized_store_backend"
] = self._store_backend_anonymizer.anonymize_store_backend_info(
store_backend_obj=store_backend_obj
)
return anonymized_info_dict
|
py
|
1a5b33c77da88ab211e1b292ee270b1bc4f91ebf
|
import torch
import argparse
import cv2
import os
import numpy as np
import torch
from PIL import Image
from torch.autograd import Function
from torchvision import models, transforms
# from utils.dataloader import MyDataSet
from torch import nn
class FeatureExtractor():
""" Class for extracting activations and
registering gradients from targetted intermediate layers """
def __init__(self, model, target_layers):
self.model = model
self.target_layers = target_layers
# print(target_layers)
self.gradients = []
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
outputs = []
self.gradients = []
x = x.view(x.size(0), -1)
for name, module in self.model._modules.items():
x = module(x)
# print("passed!")
if name in self.target_layers:
x.register_hook(self.save_gradient)
outputs += [x]
return outputs, x
class ModelOutputs():
""" Class for making a forward pass, and getting:
1. The network output.
2. Activations from intermeddiate targetted layers.
3. Gradients from intermeddiate targetted layers. """
def __init__(self, model, feature_module, target_layers):
self.model = model
self.feature_module = feature_module
self.target_layers = target_layers
self.gradients = []
def get_gradients(self):
return self.gradients
def save_gradient(self, grad):
self.gradients.append(grad)
def __call__(self, x):
target_activations = []
self.gradients = []
for name, module in self.model._modules.items():
# print(name)
# print(name)
# print(self.target_layers)
if name in self.target_layers:
x = x.view(x.size(0), -1)
x = module(x)
x.register_hook(self.save_gradient)
target_activations += [x]
# elif "aux_logits1" in name.lower():
# pass
# elif "aux_logits2" in name.lower():
# pass
else:
x = module(x)
x.register_hook(self.save_gradient)
target_activations += [x]
return target_activations, x
def preprocess_image(img):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
preprocessing = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
normalize
])
return preprocessing(img)
def show_cam_on_image(img, mask):
heatmap = cv2.applyColorMap(np.uint8(255 * mask), cv2.COLORMAP_JET)
heatmap = np.float32(heatmap) / 255
cam = heatmap + np.float32(img)
cam = cam / np.max(cam)
return np.uint8(255 * cam)
class GradCam:
def __init__(self, model, feature_module, target_layer_names, use_cuda):
self.model = model
self.feature_module = feature_module
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
self.extractor = ModelOutputs(self.model, self.feature_module, target_layer_names)
def forward(self, input_img):
return self.model(input_img)
def __call__(self, input_img, target_category=None):
if self.cuda:
input_img = input_img.cuda()
features, output = self.extractor(input_img)
if target_category == None:
target_category = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][target_category] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = one_hot.cuda()
one_hot = torch.sum(one_hot * output)
self.feature_module.zero_grad()
self.model.zero_grad()
one_hot.backward(retain_graph=True)
# print(len(self.extractor.get_gradients()))
# for fea in features:
# print(fea.shape)
grads_val = self.extractor.get_gradients()[-1].cpu().data.numpy()
# print(grads_val.shape)
target = features[0]
target = target.cpu().data.numpy()[0, :]
weights = np.mean(grads_val, axis=(2, 3))[0, :]
cam = np.zeros(target.shape[1:], dtype=np.float32)
for i, w in enumerate(weights):
cam += w * target[i, :, :]
cam = np.maximum(cam, 0)
cam = cv2.resize(cam, input_img.shape[2:])
cam = cam - np.min(cam)
cam = cam / np.max(cam)
return cam
class GuidedBackpropReLU(Function):
@staticmethod
def forward(self, input_img):
positive_mask = (input_img > 0).type_as(input_img)
output = torch.addcmul(torch.zeros(input_img.size()).type_as(input_img), input_img, positive_mask)
self.save_for_backward(input_img, output)
return output
@staticmethod
def backward(self, grad_output):
input_img, output = self.saved_tensors
grad_input = None
positive_mask_1 = (input_img > 0).type_as(grad_output)
positive_mask_2 = (grad_output > 0).type_as(grad_output)
grad_input = torch.addcmul(torch.zeros(input_img.size()).type_as(input_img),
torch.addcmul(torch.zeros(input_img.size()).type_as(input_img), grad_output,
positive_mask_1), positive_mask_2)
return grad_input
class GuidedBackpropReLUModel:
def __init__(self, model, use_cuda):
self.model = model
self.model.eval()
self.cuda = use_cuda
if self.cuda:
self.model = model.cuda()
def recursive_relu_apply(module_top):
for idx, module in module_top._modules.items():
recursive_relu_apply(module)
if module.__class__.__name__ == 'ReLU':
module_top._modules[idx] = GuidedBackpropReLU.apply
# replace ReLU with GuidedBackpropReLU
recursive_relu_apply(self.model)
def forward(self, input_img):
return self.model(input_img)
def __call__(self, input_img, target_category=None):
if self.cuda:
input_img = input_img.cuda()
input_img = input_img.requires_grad_(True)
output = self.forward(input_img)
if target_category == None:
target_category = np.argmax(output.cpu().data.numpy())
one_hot = np.zeros((1, output.size()[-1]), dtype=np.float32)
one_hot[0][target_category] = 1
one_hot = torch.from_numpy(one_hot).requires_grad_(True)
if self.cuda:
one_hot = one_hot.cuda()
one_hot = torch.sum(one_hot * output)
one_hot.backward(retain_graph=True)
output = input_img.grad.cpu().data.numpy()
output = output[0, :, :, :]
return output
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--use-cuda', action='store_true', default=True,
help='Use NVIDIA GPU acceleration')
parser.add_argument('--weight-path', type=str, default='./model/InceptionV2_1209/net.pkl',
help='pretrained weight path')
parser.add_argument('--image-path', type=str, default='./data/para_test',
help='Input image path')
parser.add_argument('--output-path', type=str, default='./results/0117test/Ip2',
help='Input image path')
args = parser.parse_args()
args.use_cuda = args.use_cuda and torch.cuda.is_available()
if args.use_cuda:
print("Using GPU for acceleration")
else:
print("Using CPU for computation")
return args
def deprocess_image(img):
""" see https://github.com/jacobgil/keras-grad-cam/blob/master/grad-cam.py#L65 """
img = img - np.mean(img)
img = img / (np.std(img) + 1e-5)
img = img * 0.1
img = img + 0.5
img = np.clip(img, 0, 1)
return np.uint8(img * 255)
def get_last_conv_name(net):
"""
获取网络的最后一个卷积层的名字
:param net:
:return:
"""
layer_name = None
for name, m in net.named_modules():
if isinstance(m, nn.Conv2d):
layer_name = name
return layer_name
if __name__ == '__main__':
""" python grad_cam.py <path_to_image>
1. Loads an image with opencv.
2. Preprocesses it for VGG19 and converts to a pytorch variable.
3. Makes a forward pass to find the category index with the highest score,
and computes intermediate activations.
Makes the visualization. """
args = get_args()
model = torch.load(args.weight_path)
layer4 = None
name4 = None
for name, module in model._modules.items():
layer4 = module
# break
for name, module in model._modules.items():
name4 = name
# print(name)
# input()
grad_cam = GradCam(model=model, feature_module=layer4,
target_layer_names=[name4], use_cuda=args.use_cuda)
patht = args.image_path
gb_model = GuidedBackpropReLUModel(model=model, use_cuda=args.use_cuda)
if not os.path.exists(patht):
os.mkdir(patht)
for dir in os.listdir(patht):
folderpath = patht + '/' + dir
outfolderpath = args.output_path + '/' + dir
if not os.path.exists(outfolderpath):
os.mkdir(outfolderpath)
# print(outfolderpath)
# input()
count = 0
oplen = min(len(os.listdir(folderpath)), 20)
for img_name in os.listdir(folderpath):
count += 1
if count > oplen:
break
print("{}/{}".format(count, oplen))
image_path = folderpath + '/' + img_name
iop = outfolderpath + '/' + img_name.split('.')[0]
# print(image_path)
# print(iop)
img = Image.open(image_path)
img = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224)
])(img)
# print(type(img))
# input()
img = np.float32(img) / 255
# Opencv loads as BGR:
img = img[:, :, ::-1]
cv2.imwrite(iop + "_resize.jpg", np.uint8(255 * img))
# input()
input_img = preprocess_image(Image.open(image_path))
# print(input_img)
# print(type(input_img))
# print(input_img.shape)
input_img = input_img.unsqueeze(0)
# If None, returns the map for the highest scoring category.
# Otherwise, targets the requested category.
target_category = None
grayscale_cam = grad_cam(input_img, target_category)
grayscale_cam = cv2.resize(grayscale_cam, (img.shape[1], img.shape[0]))
cam = show_cam_on_image(img, grayscale_cam)
gb = gb_model(input_img, target_category=target_category)
gb = gb.transpose((1, 2, 0))
cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
cam_gb = deprocess_image(cam_mask * gb)
gb = deprocess_image(gb)
cv2.imwrite(iop + "_cam.jpg", cam)
cv2.imwrite(iop + '_gb.jpg', gb)
cv2.imwrite(iop + '_cam_gb.jpg', cam_gb)
# for dir in os.listdir(args.image_path):
# print(dir)
# input()
# img = Image.open(args.image_path)
# img = transforms.Compose([
# transforms.Resize(256),
# transforms.CenterCrop(224)
# ])(img)
# img = np.float32(img) / 255
# # Opencv loads as BGR:
# img = img[:, :, ::-1]
# input_img = preprocess_image(img)
# input_img = input_img.unsqueeze(0)
#
# # If None, returns the map for the highest scoring category.
# # Otherwise, targets the requested category.
# target_category = None
# grayscale_cam = grad_cam(input_img, target_category)
#
# grayscale_cam = cv2.resize(grayscale_cam, (img.shape[1], img.shape[0]))
# cam = show_cam_on_image(img, grayscale_cam)
#
# gb_model = GuidedBackpropReLUModel(model=model, use_cuda=args.use_cuda)
# gb = gb_model(input_img, target_category=target_category)
# gb = gb.transpose((1, 2, 0))
#
# cam_mask = cv2.merge([grayscale_cam, grayscale_cam, grayscale_cam])
# cam_gb = deprocess_image(cam_mask * gb)
# gb = deprocess_image(gb)
#
# cv2.imwrite(args.output_path + "_cam.jpg", cam)
# cv2.imwrite(args.output_path + '_gb.jpg', gb)
# cv2.imwrite(args.output_path + '_cam_gb.jpg', cam_gb)
|
py
|
1a5b34a287c50f44462a4534a31cd991e468f6ed
|
from django.conf.urls import include, url
from core.tests.api import Api, NoteResource, UserResource
from core.tests.resources import SubjectResource
api = Api()
api.register(NoteResource())
api.register(UserResource())
api.register(SubjectResource())
urlpatterns = [
url(r'^api/', include(api.urls)),
]
|
py
|
1a5b34f13c28db5dc88292193cd40c1a9eed69d3
|
from abc import (
ABC,
abstractmethod
)
from argparse import (
ArgumentParser,
Namespace,
_SubParsersAction,
)
from enum import (
auto,
Enum,
)
import logging
from multiprocessing import (
Process
)
from typing import (
Any,
Dict,
NamedTuple,
)
from lahja.base import EndpointAPI
from trinity.config import (
TrinityConfig
)
from trinity.extensibility.events import (
ComponentStartedEvent,
)
from trinity.extensibility.exceptions import (
InvalidComponentStatus,
)
from trinity._utils.mp import (
ctx,
)
from trinity._utils.logging import (
setup_log_levels,
setup_queue_logging,
)
from trinity._utils.os import (
friendly_filename_or_url,
)
from trinity._utils.profiling import (
profiler,
)
class ComponentStatus(Enum):
NOT_READY = auto()
READY = auto()
STARTED = auto()
STOPPED = auto()
INVALID_START_STATUS = (ComponentStatus.NOT_READY, ComponentStatus.STARTED,)
class TrinityBootInfo(NamedTuple):
args: Namespace
trinity_config: TrinityConfig
boot_kwargs: Dict[str, Any] = None
class BaseComponent(ABC):
_status: ComponentStatus = ComponentStatus.NOT_READY
def __init__(self, boot_info: TrinityBootInfo) -> None:
self.boot_info = boot_info
@property
@abstractmethod
def event_bus(self) -> EndpointAPI:
...
@property
@abstractmethod
def name(self) -> str:
"""
Describe the name of the component.
"""
...
@property
def normalized_name(self) -> str:
"""
The normalized (computer readable) name of the component
"""
return friendly_filename_or_url(self.name)
@classmethod
def get_logger(cls) -> logging.Logger:
return logging.getLogger(f'trinity.extensibility.component(#{cls.__name__})')
@property
def logger(self) -> logging.Logger:
return self.get_logger()
@property
def running(self) -> bool:
"""
Return ``True`` if the ``status`` is ``ComponentStatus.STARTED``,
otherwise return ``False``.
"""
return self._status is ComponentStatus.STARTED
@property
def status(self) -> ComponentStatus:
"""
Return the current :class:`~trinity.extensibility.component.ComponentStatus`
of the component.
"""
return self._status
def ready(self, manager_eventbus: EndpointAPI) -> None:
"""
Set the ``status`` to ``ComponentStatus.READY`` and delegate to
:meth:`~trinity.extensibility.component.BaseComponent.on_ready`
"""
self._status = ComponentStatus.READY
self.on_ready(manager_eventbus)
def on_ready(self, manager_eventbus: EndpointAPI) -> None:
"""
Notify the component that it is ready to bootstrap itself.
The ``manager_eventbus`` refers to the instance of the
:class:`~lahja.endpoint.Endpoint` that the
:class:`~trinity.extensibility.component_manager.ComponentManager` uses which may or may not
be the same :class:`~lahja.endpoint.Endpoint` as the component uses depending on the type
of the component. The component should use this :class:`~lahja.endpoint.Endpoint` instance
to listen for events *before* the component has started.
"""
pass
@classmethod
def configure_parser(cls, arg_parser: ArgumentParser, subparser: _SubParsersAction) -> None:
"""
Give the component a chance to amend the Trinity CLI argument parser. This hook is called
before :meth:`~trinity.extensibility.component.BaseComponent.on_ready`
"""
pass
def start(self) -> None:
"""
Delegate to :meth:`~trinity.extensibility.component.BaseComponent.do_start` and set
``running`` to ``True``. Broadcast a
:class:`~trinity.extensibility.events.ComponentStartedEvent` on the event bus and hence
allow other components to act accordingly.
"""
if self._status in INVALID_START_STATUS:
raise InvalidComponentStatus(
f"Can not start component when the component status is {self.status}"
)
self._status = ComponentStatus.STARTED
self.do_start()
self.event_bus.broadcast_nowait(
ComponentStartedEvent(type(self))
)
self.logger.info("Component started: %s", self.name)
def do_start(self) -> None:
"""
Perform the actual component start routine. In the case of a `BaseIsolatedComponent` this
method will be called in a separate process.
This method should usually be overwritten by subclasses with the exception of components
that set ``func`` on the ``ArgumentParser`` to redefine the entire host program.
"""
pass
class BaseMainProcessComponent(BaseComponent):
"""
A :class:`~trinity.extensibility.component.BaseMainProcessComponent` overtakes the whole main
process early before any of the subsystems started. In that sense it redefines the whole meaning
of the ``trinity`` command.
"""
@property
def event_bus(self) -> EndpointAPI:
raise NotImplementedError('BaseMainProcessComponents do not have event busses')
class BaseIsolatedComponent(BaseComponent):
"""
A :class:`~trinity.extensibility.component.BaseIsolatedComponent` runs in an isolated process
and hence provides security and flexibility by not making assumptions about its internal
operations.
Such components are free to use non-blocking asyncio as well as synchronous calls. When an
isolated component is stopped it does first receive a SIGINT followed by a SIGTERM soon after.
It is up to the component to handle these signals accordingly.
"""
_process: Process = None
_event_bus: EndpointAPI = None
@property
def process(self) -> Process:
"""
Return the ``Process`` created by the isolated component.
"""
return self._process
def start(self) -> None:
"""
Prepare the component to get started and eventually call ``do_start`` in a separate process.
"""
self._status = ComponentStatus.STARTED
self._process = ctx.Process(
target=self._prepare_spawn,
)
self._process.start()
self.logger.info("Component started: %s (pid=%d)", self.name, self._process.pid)
def _prepare_spawn(self) -> None:
if self.boot_info.boot_kwargs.pop('profile', False):
with profiler(f'profile_{self.normalized_name}'):
self._spawn_start()
else:
self._spawn_start()
@abstractmethod
def _spawn_start(self) -> None:
...
def stop(self) -> None:
"""
Set the ``status`` to `STOPPED`` but rely on the
:class:`~trinity.extensibility.component_manager.ComponentManager` to tear down the process.
This allows isolated components to be taken down concurrently without depending on a running
event loop.
"""
self._status = ComponentStatus.STOPPED
def _setup_logging(self) -> None:
log_queue = self.boot_info.boot_kwargs['log_queue']
level = self.boot_info.boot_kwargs.get('log_level', logging.INFO)
setup_queue_logging(log_queue, level)
if self.boot_info.args.log_levels:
setup_log_levels(self.boot_info.args.log_levels)
|
py
|
1a5b362b161902c95edfd707f663f1c90406f155
|
from brownie import interface
class CompoundSystem:
def __init__(self, registry):
self.registry = registry
self.comptroller = interface.IComptroller(registry.compound.comptroller)
def ctoken(self, name: str):
if name not in self.registry.compound.cTokens:
raise Exception(f"No cToken found for key {name}")
return interface.ICToken(self.registry.compound.cTokens[name])
|
py
|
1a5b36b07e93ca7daf1f01f13f6d7a37d482a5e7
|
import collections
from datetime import timedelta
import functools
import gc
import json
import operator
import pickle
import re
from textwrap import dedent
from typing import (
Callable,
Dict,
FrozenSet,
Hashable,
List,
Optional,
Sequence,
Set,
Union,
)
import warnings
import weakref
import numpy as np
from pandas._config import config
from pandas._libs import Timestamp, iNaT, properties
from pandas.compat import set_function_name
from pandas.compat._optional import import_optional_dependency
from pandas.compat.numpy import function as nv
from pandas.errors import AbstractMethodError
from pandas.util._decorators import Appender, Substitution, rewrite_axis_style_signature
from pandas.util._validators import validate_bool_kwarg, validate_fillna_kwargs
from pandas.core.dtypes.cast import maybe_promote, maybe_upcast_putmask
from pandas.core.dtypes.common import (
ensure_int64,
ensure_object,
ensure_str,
is_bool,
is_bool_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_dict_like,
is_extension_array_dtype,
is_integer,
is_list_like,
is_number,
is_numeric_dtype,
is_object_dtype,
is_period_arraylike,
is_re_compilable,
is_scalar,
is_timedelta64_dtype,
pandas_dtype,
)
from pandas.core.dtypes.generic import ABCDataFrame, ABCSeries
from pandas.core.dtypes.inference import is_hashable
from pandas.core.dtypes.missing import isna, notna
import pandas as pd
from pandas._typing import Dtype, FilePathOrBuffer
from pandas.core import missing, nanops
import pandas.core.algorithms as algos
from pandas.core.base import PandasObject, SelectionMixin
import pandas.core.common as com
from pandas.core.index import (
Index,
InvalidIndexError,
MultiIndex,
RangeIndex,
ensure_index,
)
from pandas.core.indexes.datetimes import DatetimeIndex
from pandas.core.indexes.period import Period, PeriodIndex
import pandas.core.indexing as indexing
from pandas.core.internals import BlockManager
from pandas.core.ops import _align_method_FRAME
from pandas.io.formats import format as fmt
from pandas.io.formats.format import DataFrameFormatter, format_percentiles
from pandas.io.formats.printing import pprint_thing
from pandas.tseries.frequencies import to_offset
# goal is to be able to define the docs close to function, while still being
# able to share
_shared_docs = dict() # type: Dict[str, str]
_shared_doc_kwargs = dict(
axes="keywords for axes",
klass="Series/DataFrame",
axes_single_arg="int or labels for object",
args_transpose="axes to permute (int or label for object)",
optional_by="""
by : str or list of str
Name or list of names to sort by""",
)
# sentinel value to use as kwarg in place of None when None has special meaning
# and needs to be distinguished from a user explicitly passing None.
sentinel = object()
def _single_replace(self, to_replace, method, inplace, limit):
"""
Replaces values in a Series using the fill method specified when no
replacement value is given in the replace method
"""
if self.ndim != 1:
raise TypeError(
"cannot replace {0} with method {1} on a {2}".format(
to_replace, method, type(self).__name__
)
)
orig_dtype = self.dtype
result = self if inplace else self.copy()
fill_f = missing.get_fill_func(method)
mask = missing.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
if values.dtype == orig_dtype and inplace:
return
result = pd.Series(values, index=self.index, dtype=self.dtype).__finalize__(self)
if inplace:
self._update_inplace(result._data)
return
return result
bool_t = bool # Need alias because NDFrame has def bool:
class NDFrame(PandasObject, SelectionMixin):
"""
N-dimensional analogue of DataFrame. Store multi-dimensional in a
size-mutable, labeled data structure
Parameters
----------
data : BlockManager
axes : list
copy : bool, default False
"""
_internal_names = [
"_data",
"_cacher",
"_item_cache",
"_cache",
"_is_copy",
"_subtyp",
"_name",
"_index",
"_default_kind",
"_default_fill_value",
"_metadata",
"__array_struct__",
"__array_interface__",
] # type: List[str]
_internal_names_set = set(_internal_names) # type: Set[str]
_accessors = set() # type: Set[str]
_deprecations = frozenset(
["as_blocks", "blocks", "is_copy"]
) # type: FrozenSet[str]
_metadata = [] # type: List[str]
_is_copy = None
_data = None # type: BlockManager
# ----------------------------------------------------------------------
# Constructors
def __init__(
self,
data: BlockManager,
axes: Optional[List[Index]] = None,
copy: bool = False,
dtype: Optional[Dtype] = None,
fastpath: bool = False,
):
if not fastpath:
if dtype is not None:
data = data.astype(dtype)
elif copy:
data = data.copy()
if axes is not None:
for i, ax in enumerate(axes):
data = data.reindex_axis(ax, axis=i)
object.__setattr__(self, "_is_copy", None)
object.__setattr__(self, "_data", data)
object.__setattr__(self, "_item_cache", {})
def _init_mgr(self, mgr, axes=None, dtype=None, copy=False):
""" passed a manager and a axes dict """
for a, axe in axes.items():
if axe is not None:
mgr = mgr.reindex_axis(
axe, axis=self._get_block_manager_axis(a), copy=False
)
# make a copy if explicitly requested
if copy:
mgr = mgr.copy()
if dtype is not None:
# avoid further copies if we can
if len(mgr.blocks) > 1 or mgr.blocks[0].values.dtype != dtype:
mgr = mgr.astype(dtype=dtype)
return mgr
# ----------------------------------------------------------------------
@property
def is_copy(self):
"""
Return the copy.
"""
warnings.warn(
"Attribute 'is_copy' is deprecated and will be removed "
"in a future version.",
FutureWarning,
stacklevel=2,
)
return self._is_copy
@is_copy.setter
def is_copy(self, msg):
warnings.warn(
"Attribute 'is_copy' is deprecated and will be removed "
"in a future version.",
FutureWarning,
stacklevel=2,
)
self._is_copy = msg
def _validate_dtype(self, dtype):
""" validate the passed dtype """
if dtype is not None:
dtype = pandas_dtype(dtype)
# a compound dtype
if dtype.kind == "V":
raise NotImplementedError(
"compound dtypes are not implemented"
" in the {0} constructor".format(self.__class__.__name__)
)
return dtype
# ----------------------------------------------------------------------
# Construction
@property
def _constructor(self):
"""Used when a manipulation result has the same dimensions as the
original.
"""
raise AbstractMethodError(self)
@property
def _constructor_sliced(self):
"""Used when a manipulation result has one lower dimension(s) as the
original, such as DataFrame single columns slicing.
"""
raise AbstractMethodError(self)
@property
def _constructor_expanddim(self):
"""Used when a manipulation result has one higher dimension as the
original, such as Series.to_frame()
"""
raise NotImplementedError
# ----------------------------------------------------------------------
# Axis
@classmethod
def _setup_axes(
cls,
axes,
info_axis=None,
stat_axis=None,
aliases=None,
axes_are_reversed=False,
build_axes=True,
ns=None,
docs=None,
):
"""
Provide axes setup for the major PandasObjects.
Parameters
----------
axes : the names of the axes in order (lowest to highest)
info_axis_num : the axis of the selector dimension (int)
stat_axis_num : the number of axis for the default stats (int)
aliases : other names for a single axis (dict)
axes_are_reversed : bool
Whether to treat passed axes as reversed (DataFrame).
build_axes : setup the axis properties (default True)
"""
cls._AXIS_ORDERS = axes
cls._AXIS_NUMBERS = {a: i for i, a in enumerate(axes)}
cls._AXIS_LEN = len(axes)
cls._AXIS_ALIASES = aliases or dict()
cls._AXIS_IALIASES = {v: k for k, v in cls._AXIS_ALIASES.items()}
cls._AXIS_NAMES = dict(enumerate(axes))
cls._AXIS_REVERSED = axes_are_reversed
# typ
setattr(cls, "_typ", cls.__name__.lower())
# indexing support
cls._ix = None
if info_axis is not None:
cls._info_axis_number = info_axis
cls._info_axis_name = axes[info_axis]
if stat_axis is not None:
cls._stat_axis_number = stat_axis
cls._stat_axis_name = axes[stat_axis]
# setup the actual axis
if build_axes:
def set_axis(a, i):
setattr(cls, a, properties.AxisProperty(i, docs.get(a, a)))
cls._internal_names_set.add(a)
if axes_are_reversed:
m = cls._AXIS_LEN - 1
for i, a in cls._AXIS_NAMES.items():
set_axis(a, m - i)
else:
for i, a in cls._AXIS_NAMES.items():
set_axis(a, i)
assert not isinstance(ns, dict)
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d
@staticmethod
def _construct_axes_dict_from(self, axes, **kwargs):
"""Return an axes dictionary for the passed axes."""
d = {a: ax for a, ax in zip(self._AXIS_ORDERS, axes)}
d.update(kwargs)
return d
def _construct_axes_from_arguments(
self, args, kwargs, require_all=False, sentinel=None
):
"""Construct and returns axes if supplied in args/kwargs.
If require_all, raise if all axis arguments are not supplied
return a tuple of (axes, kwargs).
sentinel specifies the default parameter when an axis is not
supplied; useful to distinguish when a user explicitly passes None
in scenarios where None has special meaning.
"""
# construct the args
args = list(args)
for a in self._AXIS_ORDERS:
# if we have an alias for this axis
alias = self._AXIS_IALIASES.get(a)
if alias is not None:
if a in kwargs:
if alias in kwargs:
raise TypeError(
"arguments are mutually exclusive "
"for [%s,%s]" % (a, alias)
)
continue
if alias in kwargs:
kwargs[a] = kwargs.pop(alias)
continue
# look for a argument by position
if a not in kwargs:
try:
kwargs[a] = args.pop(0)
except IndexError:
if require_all:
raise TypeError("not enough/duplicate arguments specified!")
axes = {a: kwargs.pop(a, sentinel) for a in self._AXIS_ORDERS}
return axes, kwargs
@classmethod
def _from_axes(cls, data, axes, **kwargs):
# for construction from BlockManager
if isinstance(data, BlockManager):
return cls(data, **kwargs)
else:
if cls._AXIS_REVERSED:
axes = axes[::-1]
d = cls._construct_axes_dict_from(cls, axes, copy=False)
d.update(kwargs)
return cls(data, **d)
@classmethod
def _get_axis_number(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if is_integer(axis):
if axis in cls._AXIS_NAMES:
return axis
else:
try:
return cls._AXIS_NUMBERS[axis]
except KeyError:
pass
raise ValueError("No axis named {0} for object type {1}".format(axis, cls))
@classmethod
def _get_axis_name(cls, axis):
axis = cls._AXIS_ALIASES.get(axis, axis)
if isinstance(axis, str):
if axis in cls._AXIS_NUMBERS:
return axis
else:
try:
return cls._AXIS_NAMES[axis]
except KeyError:
pass
raise ValueError("No axis named {0} for object type {1}".format(axis, cls))
def _get_axis(self, axis):
name = self._get_axis_name(axis)
return getattr(self, name)
@classmethod
def _get_block_manager_axis(cls, axis):
"""Map the axis to the block_manager axis."""
axis = cls._get_axis_number(axis)
if cls._AXIS_REVERSED:
m = cls._AXIS_LEN - 1
return m - axis
return axis
def _get_axis_resolvers(self, axis):
# index or columns
axis_index = getattr(self, axis)
d = dict()
prefix = axis[0]
for i, name in enumerate(axis_index.names):
if name is not None:
key = level = name
else:
# prefix with 'i' or 'c' depending on the input axis
# e.g., you must do ilevel_0 for the 0th level of an unnamed
# multiiindex
key = "{prefix}level_{i}".format(prefix=prefix, i=i)
level = i
level_values = axis_index.get_level_values(level)
s = level_values.to_series()
s.index = axis_index
d[key] = s
# put the index/columns itself in the dict
if isinstance(axis_index, MultiIndex):
dindex = axis_index
else:
dindex = axis_index.to_series()
d[axis] = dindex
return d
def _get_index_resolvers(self):
d = {}
for axis_name in self._AXIS_ORDERS:
d.update(self._get_axis_resolvers(axis_name))
return d
def _get_space_character_free_column_resolvers(self):
"""Return the space character free column resolvers of a dataframe.
Column names with spaces are 'cleaned up' so that they can be referred
to by backtick quoting.
Used in :meth:`DataFrame.eval`.
"""
from pandas.core.computation.common import _remove_spaces_column_name
return {_remove_spaces_column_name(k): v for k, v in self.items()}
@property
def _info_axis(self):
return getattr(self, self._info_axis_name)
@property
def _stat_axis(self):
return getattr(self, self._stat_axis_name)
@property
def shape(self):
"""
Return a tuple of axis dimensions
"""
return tuple(len(self._get_axis(a)) for a in self._AXIS_ORDERS)
@property
def axes(self):
"""
Return index label(s) of the internal NDFrame
"""
# we do it this way because if we have reversed axes, then
# the block manager shows then reversed
return [self._get_axis(a) for a in self._AXIS_ORDERS]
@property
def ndim(self):
"""
Return an int representing the number of axes / array dimensions.
Return 1 if Series. Otherwise return 2 if DataFrame.
See Also
--------
ndarray.ndim : Number of array dimensions.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.ndim
1
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.ndim
2
"""
return self._data.ndim
@property
def size(self):
"""
Return an int representing the number of elements in this object.
Return the number of rows if Series. Otherwise return the number of
rows times number of columns if DataFrame.
See Also
--------
ndarray.size : Number of elements in the array.
Examples
--------
>>> s = pd.Series({'a': 1, 'b': 2, 'c': 3})
>>> s.size
3
>>> df = pd.DataFrame({'col1': [1, 2], 'col2': [3, 4]})
>>> df.size
4
"""
return np.prod(self.shape)
@property
def _selected_obj(self):
""" internal compat with SelectionMixin """
return self
@property
def _obj_with_exclusions(self):
""" internal compat with SelectionMixin """
return self
def set_axis(self, labels, axis=0, inplace=False):
"""
Assign desired index to given axis.
Indexes for column or row labels can be changed by assigning
a list-like or Index.
.. versionchanged:: 0.21.0
The signature is now `labels` and `axis`, consistent with
the rest of pandas API. Previously, the `axis` and `labels`
arguments were respectively the first and second positional
arguments.
Parameters
----------
labels : list-like, Index
The values for the new index.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to update. The value 0 identifies the rows, and 1
identifies the columns.
inplace : bool, default False
Whether to return a new %(klass)s instance.
Returns
-------
renamed : %(klass)s or None
An object of same type as caller if inplace=False, None otherwise.
See Also
--------
DataFrame.rename_axis : Alter the name of the index or columns.
Examples
--------
**Series**
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.set_axis(['a', 'b', 'c'], axis=0)
a 1
b 2
c 3
dtype: int64
**DataFrame**
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
Change the row labels.
>>> df.set_axis(['a', 'b', 'c'], axis='index')
A B
a 1 4
b 2 5
c 3 6
Change the column labels.
>>> df.set_axis(['I', 'II'], axis='columns')
I II
0 1 4
1 2 5
2 3 6
Now, update the labels inplace.
>>> df.set_axis(['i', 'ii'], axis='columns', inplace=True)
>>> df
i ii
0 1 4
1 2 5
2 3 6
"""
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and "labels" as second, is still supported '
"but will be deprecated in a future version of pandas.",
FutureWarning,
stacklevel=2,
)
labels, axis = axis, labels
if inplace:
setattr(self, self._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def _set_axis(self, axis, labels):
self._data.set_axis(axis, labels)
self._clear_item_cache()
def transpose(self, *args, **kwargs):
"""
Permute the dimensions of the %(klass)s
Parameters
----------
args : %(args_transpose)s
copy : bool, default False
Make a copy of the underlying data. Mixed-dtype data will
always result in a copy
**kwargs
Additional keyword arguments will be passed to the function.
Returns
-------
y : same as input
Examples
--------
>>> p.transpose(2, 0, 1)
>>> p.transpose(2, 0, 1, copy=True)
"""
# construct the args
axes, kwargs = self._construct_axes_from_arguments(
args, kwargs, require_all=True
)
axes_names = tuple(self._get_axis_name(axes[a]) for a in self._AXIS_ORDERS)
axes_numbers = tuple(self._get_axis_number(axes[a]) for a in self._AXIS_ORDERS)
# we must have unique axes
if len(axes) != len(set(axes)):
raise ValueError("Must specify %s unique axes" % self._AXIS_LEN)
new_axes = self._construct_axes_dict_from(
self, [self._get_axis(x) for x in axes_names]
)
new_values = self.values.transpose(axes_numbers)
if kwargs.pop("copy", None) or (len(args) and args[-1]):
new_values = new_values.copy()
nv.validate_transpose(tuple(), kwargs)
return self._constructor(new_values, **new_axes).__finalize__(self)
def swapaxes(self, axis1, axis2, copy=True):
"""
Interchange axes and swap values axes appropriately.
Returns
-------
y : same as input
"""
i = self._get_axis_number(axis1)
j = self._get_axis_number(axis2)
if i == j:
if copy:
return self.copy()
return self
mapping = {i: j, j: i}
new_axes = (self._get_axis(mapping.get(k, k)) for k in range(self._AXIS_LEN))
new_values = self.values.swapaxes(i, j)
if copy:
new_values = new_values.copy()
return self._constructor(new_values, *new_axes).__finalize__(self)
def droplevel(self, level, axis=0):
"""
Return DataFrame with requested index / column level(s) removed.
.. versionadded:: 0.24.0
Parameters
----------
level : int, str, or list-like
If a string is given, must be the name of a level
If list-like, elements must be names or positional indexes
of levels.
axis : {0 or 'index', 1 or 'columns'}, default 0
Returns
-------
DataFrame.droplevel()
Examples
--------
>>> df = pd.DataFrame([
... [1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12]
... ]).set_index([0, 1]).rename_axis(['a', 'b'])
>>> df.columns = pd.MultiIndex.from_tuples([
... ('c', 'e'), ('d', 'f')
... ], names=['level_1', 'level_2'])
>>> df
level_1 c d
level_2 e f
a b
1 2 3 4
5 6 7 8
9 10 11 12
>>> df.droplevel('a')
level_1 c d
level_2 e f
b
2 3 4
6 7 8
10 11 12
>>> df.droplevel('level2', axis=1)
level_1 c d
a b
1 2 3 4
5 6 7 8
9 10 11 12
"""
labels = self._get_axis(axis)
new_labels = labels.droplevel(level)
result = self.set_axis(new_labels, axis=axis, inplace=False)
return result
def pop(self, item):
"""
Return item and drop from frame. Raise KeyError if not found.
Parameters
----------
item : str
Label of column to be popped.
Returns
-------
Series
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey','mammal', np.nan)],
... columns=('name', 'class', 'max_speed'))
>>> df
name class max_speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
>>> df.pop('class')
0 bird
1 bird
2 mammal
3 mammal
Name: class, dtype: object
>>> df
name max_speed
0 falcon 389.0
1 parrot 24.0
2 lion 80.5
3 monkey NaN
"""
result = self[item]
del self[item]
try:
result._reset_cacher()
except AttributeError:
pass
return result
def squeeze(self, axis=None):
"""
Squeeze 1 dimensional axis objects into scalars.
Series or DataFrames with a single element are squeezed to a scalar.
DataFrames with a single column or a single row are squeezed to a
Series. Otherwise the object is unchanged.
This method is most useful when you don't know if your
object is a Series or DataFrame, but you do know it has just a single
column. In that case you can safely call `squeeze` to ensure you have a
Series.
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default None
A specific axis to squeeze. By default, all length-1 axes are
squeezed.
.. versionadded:: 0.20.0
Returns
-------
DataFrame, Series, or scalar
The projection after squeezing `axis` or all the axes.
See Also
--------
Series.iloc : Integer-location based indexing for selecting scalars.
DataFrame.iloc : Integer-location based indexing for selecting Series.
Series.to_frame : Inverse of DataFrame.squeeze for a
single-column DataFrame.
Examples
--------
>>> primes = pd.Series([2, 3, 5, 7])
Slicing might produce a Series with a single value:
>>> even_primes = primes[primes % 2 == 0]
>>> even_primes
0 2
dtype: int64
>>> even_primes.squeeze()
2
Squeezing objects with more than one value in every axis does nothing:
>>> odd_primes = primes[primes % 2 == 1]
>>> odd_primes
1 3
2 5
3 7
dtype: int64
>>> odd_primes.squeeze()
1 3
2 5
3 7
dtype: int64
Squeezing is even more effective when used with DataFrames.
>>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b'])
>>> df
a b
0 1 2
1 3 4
Slicing a single column will produce a DataFrame with the columns
having only one value:
>>> df_a = df[['a']]
>>> df_a
a
0 1
1 3
So the columns can be squeezed down, resulting in a Series:
>>> df_a.squeeze('columns')
0 1
1 3
Name: a, dtype: int64
Slicing a single row from a single column will produce a single
scalar DataFrame:
>>> df_0a = df.loc[df.index < 1, ['a']]
>>> df_0a
a
0 1
Squeezing the rows produces a single scalar Series:
>>> df_0a.squeeze('rows')
a 1
Name: 0, dtype: int64
Squeezing all axes will project directly into a scalar:
>>> df_0a.squeeze()
1
"""
axis = self._AXIS_NAMES if axis is None else (self._get_axis_number(axis),)
try:
return self.iloc[
tuple(
0 if i in axis and len(a) == 1 else slice(None)
for i, a in enumerate(self.axes)
)
]
except Exception:
return self
def swaplevel(self, i=-2, j=-1, axis=0):
"""
Swap levels i and j in a MultiIndex on a particular axis
Parameters
----------
i, j : int, str (can be mixed)
Level of index to be swapped. Can pass level name as string.
Returns
-------
swapped : same type as caller (new object)
"""
axis = self._get_axis_number(axis)
result = self.copy()
labels = result._data.axes[axis]
result._data.set_axis(axis, labels.swaplevel(i, j))
return result
# ----------------------------------------------------------------------
# Rename
def rename(self, *args, **kwargs):
"""
Alter axes input function or functions. Function / dict values must be
unique (1-to-1). Labels not contained in a dict / Series will be left
as-is. Extra labels listed don't throw an error. Alternatively, change
``Series.name`` with a scalar value (Series only).
Parameters
----------
%(axes)s : scalar, list-like, dict-like or function, optional
Scalar or list-like will alter the ``Series.name`` attribute,
and raise on DataFrame.
dict-like or functions are transformations to apply to
that axis' values
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Whether to return a new %(klass)s. If True then value of copy is
ignored.
level : int or level name, default None
In case of a MultiIndex, only rename labels in the specified
level.
errors : {'ignore', 'raise'}, default 'ignore'
If 'raise', raise a `KeyError` when a dict-like `mapper`, `index`,
or `columns` contains labels that are not present in the Index
being transformed.
If 'ignore', existing keys will be renamed and extra keys will be
ignored.
Returns
-------
renamed : %(klass)s (new object)
Raises
------
KeyError
If any of the labels is not found in the selected axis and
"errors='raise'".
See Also
--------
NDFrame.rename_axis
Examples
--------
>>> s = pd.Series([1, 2, 3])
>>> s
0 1
1 2
2 3
dtype: int64
>>> s.rename("my_name") # scalar, changes Series.name
0 1
1 2
2 3
Name: my_name, dtype: int64
>>> s.rename(lambda x: x ** 2) # function, changes labels
0 1
1 2
4 3
dtype: int64
>>> s.rename({1: 3, 2: 5}) # mapping, changes labels
0 1
3 2
5 3
dtype: int64
Since ``DataFrame`` doesn't have a ``.name`` attribute,
only mapping-type arguments are allowed.
>>> df = pd.DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})
>>> df.rename(2)
Traceback (most recent call last):
...
TypeError: 'int' object is not callable
``DataFrame.rename`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
>>> df.rename(index=str, columns={"A": "a", "B": "c"})
a c
0 1 4
1 2 5
2 3 6
>>> df.rename(index=str, columns={"A": "a", "C": "c"})
a B
0 1 4
1 2 5
2 3 6
Using axis-style parameters
>>> df.rename(str.lower, axis='columns')
a b
0 1 4
1 2 5
2 3 6
>>> df.rename({1: 2, 2: 4}, axis='index')
A B
0 1 4
2 2 5
4 3 6
See the :ref:`user guide <basics.rename>` for more.
"""
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
level = kwargs.pop("level", None)
axis = kwargs.pop("axis", None)
errors = kwargs.pop("errors", "ignore")
if axis is not None:
# Validate the axis
self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename() got an unexpected keyword "
'argument "{0}"'.format(list(kwargs.keys())[0])
)
if com.count_not_none(*axes.values()) == 0:
raise TypeError("must pass an index to rename")
self._consolidate_inplace()
result = self if inplace else self.copy(deep=copy)
# start in the axis order to eliminate too many copies
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is None:
continue
f = com.get_rename_function(v)
baxis = self._get_block_manager_axis(axis)
if level is not None:
level = self.axes[axis]._get_level_number(level)
# GH 13473
if not callable(v):
indexer = self.axes[axis].get_indexer_for(v)
if errors == "raise" and len(indexer[indexer == -1]):
missing_labels = [
label for index, label in enumerate(v) if indexer[index] == -1
]
raise KeyError("{} not found in axis".format(missing_labels))
result._data = result._data.rename_axis(
f, axis=baxis, copy=copy, level=level
)
result._clear_item_cache()
if inplace:
self._update_inplace(result._data)
else:
return result.__finalize__(self)
@rewrite_axis_style_signature("mapper", [("copy", True), ("inplace", False)])
def rename_axis(self, mapper=sentinel, **kwargs):
"""
Set the name of the axis for the index or columns.
Parameters
----------
mapper : scalar, list-like, optional
Value to set the axis name attribute.
index, columns : scalar, list-like, dict-like or function, optional
A scalar, list-like, dict-like or functions transformations to
apply to that axis' values.
Use either ``mapper`` and ``axis`` to
specify the axis to target with ``mapper``, or ``index``
and/or ``columns``.
.. versionchanged:: 0.24.0
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to rename.
copy : bool, default True
Also copy underlying data.
inplace : bool, default False
Modifies the object directly, instead of creating a new Series
or DataFrame.
Returns
-------
Series, DataFrame, or None
The same type as the caller or None if `inplace` is True.
See Also
--------
Series.rename : Alter Series index labels or name.
DataFrame.rename : Alter DataFrame index labels or name.
Index.rename : Set new names on index.
Notes
-----
``DataFrame.rename_axis`` supports two calling conventions
* ``(index=index_mapper, columns=columns_mapper, ...)``
* ``(mapper, axis={'index', 'columns'}, ...)``
The first calling convention will only modify the names of
the index and/or the names of the Index object that is the columns.
In this case, the parameter ``copy`` is ignored.
The second calling convention will modify the names of the
the corresponding index if mapper is a list or a scalar.
However, if mapper is dict-like or a function, it will use the
deprecated behavior of modifying the axis *labels*.
We *highly* recommend using keyword arguments to clarify your
intent.
Examples
--------
**Series**
>>> s = pd.Series(["dog", "cat", "monkey"])
>>> s
0 dog
1 cat
2 monkey
dtype: object
>>> s.rename_axis("animal")
animal
0 dog
1 cat
2 monkey
dtype: object
**DataFrame**
>>> df = pd.DataFrame({"num_legs": [4, 4, 2],
... "num_arms": [0, 0, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs num_arms
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("animal")
>>> df
num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
>>> df = df.rename_axis("limbs", axis="columns")
>>> df
limbs num_legs num_arms
animal
dog 4 0
cat 4 0
monkey 2 2
**MultiIndex**
>>> df.index = pd.MultiIndex.from_product([['mammal'],
... ['dog', 'cat', 'monkey']],
... names=['type', 'name'])
>>> df
limbs num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(index={'type': 'class'})
limbs num_legs num_arms
class name
mammal dog 4 0
cat 4 0
monkey 2 2
>>> df.rename_axis(columns=str.upper)
LIMBS num_legs num_arms
type name
mammal dog 4 0
cat 4 0
monkey 2 2
"""
axes, kwargs = self._construct_axes_from_arguments(
(), kwargs, sentinel=sentinel
)
copy = kwargs.pop("copy", True)
inplace = kwargs.pop("inplace", False)
axis = kwargs.pop("axis", 0)
if axis is not None:
axis = self._get_axis_number(axis)
if kwargs:
raise TypeError(
"rename_axis() got an unexpected keyword "
'argument "{0}"'.format(list(kwargs.keys())[0])
)
inplace = validate_bool_kwarg(inplace, "inplace")
if mapper is not sentinel:
# Use v0.23 behavior if a scalar or list
non_mapper = is_scalar(mapper) or (
is_list_like(mapper) and not is_dict_like(mapper)
)
if non_mapper:
return self._set_axis_name(mapper, axis=axis, inplace=inplace)
else:
raise ValueError("Use `.rename` to alter labels with a mapper.")
else:
# Use new behavior. Means that index and/or columns
# is specified
result = self if inplace else self.copy(deep=copy)
for axis in range(self._AXIS_LEN):
v = axes.get(self._AXIS_NAMES[axis])
if v is sentinel:
continue
non_mapper = is_scalar(v) or (is_list_like(v) and not is_dict_like(v))
if non_mapper:
newnames = v
else:
f = com.get_rename_function(v)
curnames = self._get_axis(axis).names
newnames = [f(name) for name in curnames]
result._set_axis_name(newnames, axis=axis, inplace=True)
if not inplace:
return result
def _set_axis_name(self, name, axis=0, inplace=False):
"""
Set the name(s) of the axis.
Parameters
----------
name : str or list of str
Name(s) to set.
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis to set the label. The value 0 or 'index' specifies index,
and the value 1 or 'columns' specifies columns.
inplace : bool, default False
If `True`, do operation inplace and return None.
.. versionadded:: 0.21.0
Returns
-------
Series, DataFrame, or None
The same type as the caller or `None` if `inplace` is `True`.
See Also
--------
DataFrame.rename : Alter the axis labels of :class:`DataFrame`.
Series.rename : Alter the index labels or set the index name
of :class:`Series`.
Index.rename : Set the name of :class:`Index` or :class:`MultiIndex`.
Examples
--------
>>> df = pd.DataFrame({"num_legs": [4, 4, 2]},
... ["dog", "cat", "monkey"])
>>> df
num_legs
dog 4
cat 4
monkey 2
>>> df._set_axis_name("animal")
num_legs
animal
dog 4
cat 4
monkey 2
>>> df.index = pd.MultiIndex.from_product(
... [["mammal"], ['dog', 'cat', 'monkey']])
>>> df._set_axis_name(["type", "name"])
legs
type name
mammal dog 4
cat 4
monkey 2
"""
axis = self._get_axis_number(axis)
idx = self._get_axis(axis).set_names(name)
inplace = validate_bool_kwarg(inplace, "inplace")
renamed = self if inplace else self.copy()
renamed.set_axis(idx, axis=axis, inplace=True)
if not inplace:
return renamed
# ----------------------------------------------------------------------
# Comparison Methods
def _indexed_same(self, other):
return all(
self._get_axis(a).equals(other._get_axis(a)) for a in self._AXIS_ORDERS
)
def equals(self, other):
"""
Test whether two objects contain the same elements.
This function allows two Series or DataFrames to be compared against
each other to see if they have the same shape and elements. NaNs in
the same location are considered equal. The column headers do not
need to have the same type, but the elements within the columns must
be the same dtype.
Parameters
----------
other : Series or DataFrame
The other Series or DataFrame to be compared with the first.
Returns
-------
bool
True if all elements are the same in both objects, False
otherwise.
See Also
--------
Series.eq : Compare two Series objects of the same length
and return a Series where each element is True if the element
in each Series is equal, False otherwise.
DataFrame.eq : Compare two DataFrame objects of the same shape and
return a DataFrame where each element is True if the respective
element in each DataFrame is equal, False otherwise.
assert_series_equal : Return True if left and right Series are equal,
False otherwise.
assert_frame_equal : Return True if left and right DataFrames are
equal, False otherwise.
numpy.array_equal : Return True if two arrays have the same shape
and elements, False otherwise.
Notes
-----
This function requires that the elements have the same dtype as their
respective elements in the other Series or DataFrame. However, the
column labels do not need to have the same type, as long as they are
still considered equal.
Examples
--------
>>> df = pd.DataFrame({1: [10], 2: [20]})
>>> df
1 2
0 10 20
DataFrames df and exactly_equal have the same types and values for
their elements and column labels, which will return True.
>>> exactly_equal = pd.DataFrame({1: [10], 2: [20]})
>>> exactly_equal
1 2
0 10 20
>>> df.equals(exactly_equal)
True
DataFrames df and different_column_type have the same element
types and values, but have different types for the column labels,
which will still return True.
>>> different_column_type = pd.DataFrame({1.0: [10], 2.0: [20]})
>>> different_column_type
1.0 2.0
0 10 20
>>> df.equals(different_column_type)
True
DataFrames df and different_data_type have different types for the
same values for their elements, and will return False even though
their column labels are the same values and types.
>>> different_data_type = pd.DataFrame({1: [10.0], 2: [20.0]})
>>> different_data_type
1 2
0 10.0 20.0
>>> df.equals(different_data_type)
False
"""
if not isinstance(other, self._constructor):
return False
return self._data.equals(other._data)
# -------------------------------------------------------------------------
# Unary Methods
def __neg__(self):
values = com.values_from_object(self)
if is_bool_dtype(values):
arr = operator.inv(values)
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.neg(values)
else:
raise TypeError(
"Unary negative expects numeric dtype, not {}".format(values.dtype)
)
return self.__array_wrap__(arr)
def __pos__(self):
values = com.values_from_object(self)
if is_bool_dtype(values) or is_period_arraylike(values):
arr = values
elif (
is_numeric_dtype(values)
or is_timedelta64_dtype(values)
or is_object_dtype(values)
):
arr = operator.pos(values)
else:
raise TypeError(
"Unary plus expects numeric dtype, not {}".format(values.dtype)
)
return self.__array_wrap__(arr)
def __invert__(self):
try:
arr = operator.inv(com.values_from_object(self))
return self.__array_wrap__(arr)
except Exception:
# inv fails with 0 len
if not np.prod(self.shape):
return self
raise
def __nonzero__(self):
raise ValueError(
"The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all().".format(
self.__class__.__name__
)
)
__bool__ = __nonzero__
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
Returns
-------
bool
Same single boolean value converted to bool type.
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError(
"bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__)
)
self.__nonzero__()
def __abs__(self):
return self.abs()
def __round__(self, decimals=0):
return self.round(decimals)
# -------------------------------------------------------------------------
# Label or Level Combination Helpers
#
# A collection of helper methods for DataFrame/Series operations that
# accept a combination of column/index labels and levels. All such
# operations should utilize/extend these methods when possible so that we
# have consistent precedence and validation logic throughout the library.
def _is_level_reference(self, key, axis=0):
"""
Test whether a key is a level reference for a given axis.
To be considered a level reference, `key` must be a string that:
- (axis=0): Matches the name of an index level and does NOT match
a column label.
- (axis=1): Matches the name of a column level and does NOT match
an index label.
Parameters
----------
key : str
Potential level name for the given axis
axis : int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_level : bool
"""
axis = self._get_axis_number(axis)
return (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and not self._is_label_reference(key, axis=axis)
)
def _is_label_reference(self, key, axis=0):
"""
Test whether a key is a label reference for a given axis.
To be considered a label reference, `key` must be a string that:
- (axis=0): Matches a column label
- (axis=1): Matches an index label
Parameters
----------
key: str
Potential label name
axis: int, default 0
Axis perpendicular to the axis that labels are associated with
(0 means search for column labels, 1 means search for index labels)
Returns
-------
is_label: bool
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
return (
key is not None
and is_hashable(key)
and any(key in self.axes[ax] for ax in other_axes)
)
def _is_label_or_level_reference(self, key, axis=0):
"""
Test whether a key is a label or level reference for a given axis.
To be considered either a label or a level reference, `key` must be a
string that:
- (axis=0): Matches a column label or an index level
- (axis=1): Matches an index label or a column level
Parameters
----------
key: str
Potential label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
is_label_or_level: bool
"""
return self._is_level_reference(key, axis=axis) or self._is_label_reference(
key, axis=axis
)
def _check_label_or_level_ambiguity(self, key, axis=0):
"""
Check whether `key` is ambiguous.
By ambiguous, we mean that it matches both a level of the input
`axis` and a label of the other axis.
Parameters
----------
key: str or object
label or level name
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Raises
------
ValueError: `key` is ambiguous
"""
axis = self._get_axis_number(axis)
other_axes = (ax for ax in range(self._AXIS_LEN) if ax != axis)
if (
key is not None
and is_hashable(key)
and key in self.axes[axis].names
and any(key in self.axes[ax] for ax in other_axes)
):
# Build an informative and grammatical warning
level_article, level_type = (
("an", "index") if axis == 0 else ("a", "column")
)
label_article, label_type = (
("a", "column") if axis == 0 else ("an", "index")
)
msg = (
"'{key}' is both {level_article} {level_type} level and "
"{label_article} {label_type} label, which is ambiguous."
).format(
key=key,
level_article=level_article,
level_type=level_type,
label_article=label_article,
label_type=label_type,
)
raise ValueError(msg)
def _get_label_or_level_values(self, key, axis=0):
"""
Return a 1-D array of values associated with `key`, a label or level
from the given `axis`.
Retrieval logic:
- (axis=0): Return column values if `key` matches a column label.
Otherwise return index level values if `key` matches an index
level.
- (axis=1): Return row values if `key` matches an index label.
Otherwise return column level values if 'key' matches a column
level
Parameters
----------
key: str
Label or level name.
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
values: np.ndarray
Raises
------
KeyError
if `key` matches neither a label nor a level
ValueError
if `key` matches multiple labels
FutureWarning
if `key` is ambiguous. This will become an ambiguity error in a
future version
"""
axis = self._get_axis_number(axis)
other_axes = [ax for ax in range(self._AXIS_LEN) if ax != axis]
if self._is_label_reference(key, axis=axis):
self._check_label_or_level_ambiguity(key, axis=axis)
values = self.xs(key, axis=other_axes[0])._values
elif self._is_level_reference(key, axis=axis):
values = self.axes[axis].get_level_values(key)._values
else:
raise KeyError(key)
# Check for duplicates
if values.ndim > 1:
if other_axes and isinstance(self._get_axis(other_axes[0]), MultiIndex):
multi_message = (
"\n"
"For a multi-index, the label must be a "
"tuple with elements corresponding to "
"each level."
)
else:
multi_message = ""
label_axis_name = "column" if axis == 0 else "index"
raise ValueError(
(
"The {label_axis_name} label '{key}' "
"is not unique.{multi_message}"
).format(
key=key,
label_axis_name=label_axis_name,
multi_message=multi_message,
)
)
return values
def _drop_labels_or_levels(self, keys, axis=0):
"""
Drop labels and/or levels for the given `axis`.
For each key in `keys`:
- (axis=0): If key matches a column label then drop the column.
Otherwise if key matches an index level then drop the level.
- (axis=1): If key matches an index label then drop the row.
Otherwise if key matches a column level then drop the level.
Parameters
----------
keys: str or list of str
labels or levels to drop
axis: int, default 0
Axis that levels are associated with (0 for index, 1 for columns)
Returns
-------
dropped: DataFrame
Raises
------
ValueError
if any `keys` match neither a label nor a level
"""
axis = self._get_axis_number(axis)
# Validate keys
keys = com.maybe_make_list(keys)
invalid_keys = [
k for k in keys if not self._is_label_or_level_reference(k, axis=axis)
]
if invalid_keys:
raise ValueError(
(
"The following keys are not valid labels or "
"levels for axis {axis}: {invalid_keys}"
).format(axis=axis, invalid_keys=invalid_keys)
)
# Compute levels and labels to drop
levels_to_drop = [k for k in keys if self._is_level_reference(k, axis=axis)]
labels_to_drop = [k for k in keys if not self._is_level_reference(k, axis=axis)]
# Perform copy upfront and then use inplace operations below.
# This ensures that we always perform exactly one copy.
# ``copy`` and/or ``inplace`` options could be added in the future.
dropped = self.copy()
if axis == 0:
# Handle dropping index levels
if levels_to_drop:
dropped.reset_index(levels_to_drop, drop=True, inplace=True)
# Handle dropping columns labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=1, inplace=True)
else:
# Handle dropping column levels
if levels_to_drop:
if isinstance(dropped.columns, MultiIndex):
# Drop the specified levels from the MultiIndex
dropped.columns = dropped.columns.droplevel(levels_to_drop)
else:
# Drop the last level of Index by replacing with
# a RangeIndex
dropped.columns = RangeIndex(dropped.columns.size)
# Handle dropping index labels
if labels_to_drop:
dropped.drop(labels_to_drop, axis=0, inplace=True)
return dropped
# ----------------------------------------------------------------------
# Iteration
def __hash__(self):
raise TypeError(
"{0!r} objects are mutable, thus they cannot be"
" hashed".format(self.__class__.__name__)
)
def __iter__(self):
"""
Iterate over info axis.
Returns
-------
iterator
Info axis as iterator.
"""
return iter(self._info_axis)
# can we get a better explanation of this?
def keys(self):
"""
Get the 'info axis' (see Indexing for more).
This is index for Series, columns for DataFrame.
Returns
-------
Index
Info axis.
"""
return self._info_axis
def items(self):
"""Iterate over (label, values) on info axis
This is index for Series and columns for DataFrame.
Returns
-------
Generator
"""
for h in self._info_axis:
yield h, self[h]
@Appender(items.__doc__)
def iteritems(self):
return self.items()
def __len__(self):
"""Returns length of info axis"""
return len(self._info_axis)
def __contains__(self, key):
"""True if the key is in the info axis"""
return key in self._info_axis
@property
def empty(self):
"""
Indicator whether DataFrame is empty.
True if DataFrame is entirely empty (no items), meaning any of the
axes are of length 0.
Returns
-------
bool
If DataFrame is empty, return True, if not return False.
See Also
--------
Series.dropna
DataFrame.dropna
Notes
-----
If DataFrame contains only NaNs, it is still not considered empty. See
the example below.
Examples
--------
An example of an actual empty DataFrame. Notice the index is empty:
>>> df_empty = pd.DataFrame({'A' : []})
>>> df_empty
Empty DataFrame
Columns: [A]
Index: []
>>> df_empty.empty
True
If we only have NaNs in our DataFrame, it is not considered empty! We
will need to drop the NaNs to make the DataFrame empty:
>>> df = pd.DataFrame({'A' : [np.nan]})
>>> df
A
0 NaN
>>> df.empty
False
>>> df.dropna().empty
True
"""
return any(len(self._get_axis(a)) == 0 for a in self._AXIS_ORDERS)
# ----------------------------------------------------------------------
# Array Interface
# This is also set in IndexOpsMixin
# GH#23114 Ensure ndarray.__op__(DataFrame) returns NotImplemented
__array_priority__ = 1000
def __array__(self, dtype=None):
return com.values_from_object(self)
def __array_wrap__(self, result, context=None):
d = self._construct_axes_dict(self._AXIS_ORDERS, copy=False)
return self._constructor(result, **d).__finalize__(self)
# ideally we would define this to avoid the getattr checks, but
# is slower
# @property
# def __array_interface__(self):
# """ provide numpy array interface method """
# values = self.values
# return dict(typestr=values.dtype.str,shape=values.shape,data=values)
def to_dense(self):
"""
Return dense representation of Series/DataFrame (as opposed to sparse).
.. deprecated:: 0.25.0
Returns
-------
%(klass)s
Dense %(klass)s.
"""
warnings.warn(
"DataFrame/Series.to_dense is deprecated "
"and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
# compat
return self
# ----------------------------------------------------------------------
# Picklability
def __getstate__(self):
meta = {k: getattr(self, k, None) for k in self._metadata}
return dict(_data=self._data, _typ=self._typ, _metadata=self._metadata, **meta)
def __setstate__(self, state):
if isinstance(state, BlockManager):
self._data = state
elif isinstance(state, dict):
typ = state.get("_typ")
if typ is not None:
# set in the order of internal names
# to avoid definitional recursion
# e.g. say fill_value needing _data to be
# defined
meta = set(self._internal_names + self._metadata)
for k in list(meta):
if k in state:
v = state[k]
object.__setattr__(self, k, v)
for k, v in state.items():
if k not in meta:
object.__setattr__(self, k, v)
else:
self._unpickle_series_compat(state)
elif isinstance(state[0], dict):
if len(state) == 5:
self._unpickle_sparse_frame_compat(state)
else:
self._unpickle_frame_compat(state)
elif len(state) == 4:
self._unpickle_panel_compat(state)
elif len(state) == 2:
self._unpickle_series_compat(state)
else: # pragma: no cover
# old pickling format, for compatibility
self._unpickle_matrix_compat(state)
self._item_cache = {}
# ----------------------------------------------------------------------
# Rendering Methods
def __repr__(self):
# string representation based upon iterating over self
# (since, by definition, `PandasContainers` are iterable)
prepr = "[%s]" % ",".join(map(pprint_thing, self))
return "%s(%s)" % (self.__class__.__name__, prepr)
def _repr_latex_(self):
"""
Returns a LaTeX representation for a particular object.
Mainly for use with nbconvert (jupyter notebook conversion to pdf).
"""
if config.get_option("display.latex.repr"):
return self.to_latex()
else:
return None
def _repr_data_resource_(self):
"""
Not a real Jupyter special repr method, but we use the same
naming convention.
"""
if config.get_option("display.html.table_schema"):
data = self.head(config.get_option("display.max_rows"))
payload = json.loads(
data.to_json(orient="table"), object_pairs_hook=collections.OrderedDict
)
return payload
# ----------------------------------------------------------------------
# I/O Methods
_shared_docs[
"to_excel"
] = """
Write %(klass)s to an Excel sheet.
To write a single %(klass)s to an Excel .xlsx file it is only necessary to
specify a target file name. To write to multiple sheets it is necessary to
create an `ExcelWriter` object with a target file name, and specify a sheet
in the file to write to.
Multiple sheets may be written to by specifying unique `sheet_name`.
With all data written to the file it is necessary to save the changes.
Note that creating an `ExcelWriter` object with a file name that already
exists will result in the contents of the existing file being erased.
Parameters
----------
excel_writer : str or ExcelWriter object
File path or existing ExcelWriter.
sheet_name : str, default 'Sheet1'
Name of sheet which will contain DataFrame.
na_rep : str, default ''
Missing data representation.
float_format : str, optional
Format string for floating point numbers. For example
``float_format="%%.2f"`` will format 0.1234 to 0.12.
columns : sequence or list of str, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of string is given it is
assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
index_label : str or sequence, optional
Column label for index column(s) if desired. If not specified, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the DataFrame uses MultiIndex.
startrow : int, default 0
Upper left cell row to dump data frame.
startcol : int, default 0
Upper left cell column to dump data frame.
engine : str, optional
Write engine to use, 'openpyxl' or 'xlsxwriter'. You can also set this
via the options ``io.excel.xlsx.writer``, ``io.excel.xls.writer``, and
``io.excel.xlsm.writer``.
merge_cells : bool, default True
Write MultiIndex and Hierarchical Rows as merged cells.
encoding : str, optional
Encoding of the resulting excel file. Only necessary for xlwt,
other writers support unicode natively.
inf_rep : str, default 'inf'
Representation for infinity (there is no native representation for
infinity in Excel).
verbose : bool, default True
Display more information in the error logs.
freeze_panes : tuple of int (length 2), optional
Specifies the one-based bottommost row and rightmost column that
is to be frozen.
.. versionadded:: 0.20.0.
See Also
--------
to_csv : Write DataFrame to a comma-separated values (csv) file.
ExcelWriter : Class for writing DataFrame objects into excel sheets.
read_excel : Read an Excel file into a pandas DataFrame.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Notes
-----
For compatibility with :meth:`~DataFrame.to_csv`,
to_excel serializes lists and dicts to strings before writing.
Once a workbook has been saved it is not possible write further data
without rewriting the whole workbook.
Examples
--------
Create, write to and save a workbook:
>>> df1 = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df1.to_excel("output.xlsx") # doctest: +SKIP
To specify the sheet name:
>>> df1.to_excel("output.xlsx",
... sheet_name='Sheet_name_1') # doctest: +SKIP
If you wish to write to more than one sheet in the workbook, it is
necessary to specify an ExcelWriter object:
>>> df2 = df1.copy()
>>> with pd.ExcelWriter('output.xlsx') as writer: # doctest: +SKIP
... df1.to_excel(writer, sheet_name='Sheet_name_1')
... df2.to_excel(writer, sheet_name='Sheet_name_2')
ExcelWriter can also be used to append to an existing Excel file:
>>> with pd.ExcelWriter('output.xlsx',
... mode='a') as writer: # doctest: +SKIP
... df.to_excel(writer, sheet_name='Sheet_name_3')
To set the library that is used to write the Excel file,
you can pass the `engine` keyword (the default engine is
automatically chosen depending on the file extension):
>>> df1.to_excel('output1.xlsx', engine='xlsxwriter') # doctest: +SKIP
"""
@Appender(_shared_docs["to_excel"] % dict(klass="object"))
def to_excel(
self,
excel_writer,
sheet_name="Sheet1",
na_rep="",
float_format=None,
columns=None,
header=True,
index=True,
index_label=None,
startrow=0,
startcol=0,
engine=None,
merge_cells=True,
encoding=None,
inf_rep="inf",
verbose=True,
freeze_panes=None,
):
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.excel import ExcelFormatter
formatter = ExcelFormatter(
df,
na_rep=na_rep,
cols=columns,
header=header,
float_format=float_format,
index=index,
index_label=index_label,
merge_cells=merge_cells,
inf_rep=inf_rep,
)
formatter.write(
excel_writer,
sheet_name=sheet_name,
startrow=startrow,
startcol=startcol,
freeze_panes=freeze_panes,
engine=engine,
)
def to_json(
self,
path_or_buf=None,
orient=None,
date_format=None,
double_precision=10,
force_ascii=True,
date_unit="ms",
default_handler=None,
lines=False,
compression="infer",
index=True,
):
"""
Convert the object to a JSON string.
Note NaN's and None will be converted to null and datetime objects
will be converted to UNIX timestamps.
Parameters
----------
path_or_buf : str or file handle, optional
File path or object. If not specified, the result is returned as
a string.
orient : str
Indication of expected JSON string format.
* Series
- default is 'index'
- allowed values are: {'split','records','index','table'}
* DataFrame
- default is 'columns'
- allowed values are:
{'split','records','index','columns','values','table'}
* The format of the JSON string
- 'split' : dict like {'index' -> [index],
'columns' -> [columns], 'data' -> [values]}
- 'records' : list like
[{column -> value}, ... , {column -> value}]
- 'index' : dict like {index -> {column -> value}}
- 'columns' : dict like {column -> {index -> value}}
- 'values' : just the values array
- 'table' : dict like {'schema': {schema}, 'data': {data}}
describing the data, and the data component is
like ``orient='records'``.
.. versionchanged:: 0.20.0
date_format : {None, 'epoch', 'iso'}
Type of date conversion. 'epoch' = epoch milliseconds,
'iso' = ISO8601. The default depends on the `orient`. For
``orient='table'``, the default is 'iso'. For all other orients,
the default is 'epoch'.
double_precision : int, default 10
The number of decimal places to use when encoding
floating point values.
force_ascii : bool, default True
Force encoded string to be ASCII.
date_unit : string, default 'ms' (milliseconds)
The time unit to encode to, governs timestamp and ISO8601
precision. One of 's', 'ms', 'us', 'ns' for second, millisecond,
microsecond, and nanosecond respectively.
default_handler : callable, default None
Handler to call if object cannot otherwise be converted to a
suitable format for JSON. Should receive a single argument which is
the object to convert and return a serialisable object.
lines : bool, default False
If 'orient' is 'records' write out line delimited json format. Will
throw ValueError if incorrect 'orient' since others are not list
like.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}
A string representing the compression to use in the output file,
only used when the first argument is a filename. By default, the
compression is inferred from the filename.
.. versionadded:: 0.21.0
.. versionchanged:: 0.24.0
'infer' option added and set to default
index : bool, default True
Whether to include the index values in the JSON string. Not
including the index (``index=False``) is only supported when
orient is 'split' or 'table'.
.. versionadded:: 0.23.0
Returns
-------
None or str
If path_or_buf is None, returns the resulting json format as a
string. Otherwise returns None.
See Also
--------
read_json
Examples
--------
>>> df = pd.DataFrame([['a', 'b'], ['c', 'd']],
... index=['row 1', 'row 2'],
... columns=['col 1', 'col 2'])
>>> df.to_json(orient='split')
'{"columns":["col 1","col 2"],
"index":["row 1","row 2"],
"data":[["a","b"],["c","d"]]}'
Encoding/decoding a Dataframe using ``'records'`` formatted JSON.
Note that index labels are not preserved with this encoding.
>>> df.to_json(orient='records')
'[{"col 1":"a","col 2":"b"},{"col 1":"c","col 2":"d"}]'
Encoding/decoding a Dataframe using ``'index'`` formatted JSON:
>>> df.to_json(orient='index')
'{"row 1":{"col 1":"a","col 2":"b"},"row 2":{"col 1":"c","col 2":"d"}}'
Encoding/decoding a Dataframe using ``'columns'`` formatted JSON:
>>> df.to_json(orient='columns')
'{"col 1":{"row 1":"a","row 2":"c"},"col 2":{"row 1":"b","row 2":"d"}}'
Encoding/decoding a Dataframe using ``'values'`` formatted JSON:
>>> df.to_json(orient='values')
'[["a","b"],["c","d"]]'
Encoding with Table Schema
>>> df.to_json(orient='table')
'{"schema": {"fields": [{"name": "index", "type": "string"},
{"name": "col 1", "type": "string"},
{"name": "col 2", "type": "string"}],
"primaryKey": "index",
"pandas_version": "0.20.0"},
"data": [{"index": "row 1", "col 1": "a", "col 2": "b"},
{"index": "row 2", "col 1": "c", "col 2": "d"}]}'
"""
from pandas.io import json
if date_format is None and orient == "table":
date_format = "iso"
elif date_format is None:
date_format = "epoch"
return json.to_json(
path_or_buf=path_or_buf,
obj=self,
orient=orient,
date_format=date_format,
double_precision=double_precision,
force_ascii=force_ascii,
date_unit=date_unit,
default_handler=default_handler,
lines=lines,
compression=compression,
index=index,
)
def to_hdf(self, path_or_buf, key, **kwargs):
"""
Write the contained data to an HDF5 file using HDFStore.
Hierarchical Data Format (HDF) is self-describing, allowing an
application to interpret the structure and contents of a file with
no outside information. One HDF file can hold a mix of related objects
which can be accessed as a group or as individual objects.
In order to add another DataFrame or Series to an existing HDF file
please use append mode and a different a key.
For more information see the :ref:`user guide <io.hdf5>`.
Parameters
----------
path_or_buf : str or pandas.HDFStore
File path or HDFStore object.
key : str
Identifier for the group in the store.
mode : {'a', 'w', 'r+'}, default 'a'
Mode to open file:
- 'w': write, a new file is created (an existing file with
the same name would be deleted).
- 'a': append, an existing file is opened for reading and
writing, and if the file does not exist it is created.
- 'r+': similar to 'a', but the file must already exist.
format : {'fixed', 'table'}, default 'fixed'
Possible values:
- 'fixed': Fixed format. Fast writing/reading. Not-appendable,
nor searchable.
- 'table': Table format. Write as a PyTables Table structure
which may perform worse but allow more flexible operations
like searching / selecting subsets of the data.
append : bool, default False
For Table formats, append the input data to the existing.
data_columns : list of columns or True, optional
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See :ref:`io.hdf5-query-data-columns`.
Applicable only to format='table'.
complevel : {0-9}, optional
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc'}, default 'zlib'
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum.
dropna : bool, default False
If true, ALL nan rows will not be written to store.
errors : str, default 'strict'
Specifies how encoding and decoding errors are to be handled.
See the errors argument for :func:`open` for a full list
of options.
See Also
--------
DataFrame.read_hdf : Read from HDF file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
DataFrame.to_sql : Write to a sql table.
DataFrame.to_feather : Write out feather-format for DataFrames.
DataFrame.to_csv : Write out to a csv file.
Examples
--------
>>> df = pd.DataFrame({'A': [1, 2, 3], 'B': [4, 5, 6]},
... index=['a', 'b', 'c'])
>>> df.to_hdf('data.h5', key='df', mode='w')
We can add another object to the same file:
>>> s = pd.Series([1, 2, 3, 4])
>>> s.to_hdf('data.h5', key='s')
Reading from HDF file:
>>> pd.read_hdf('data.h5', 'df')
A B
a 1 4
b 2 5
c 3 6
>>> pd.read_hdf('data.h5', 's')
0 1
1 2
2 3
3 4
dtype: int64
Deleting file with data:
>>> import os
>>> os.remove('data.h5')
"""
from pandas.io import pytables
pytables.to_hdf(path_or_buf, key, self, **kwargs)
def to_msgpack(self, path_or_buf=None, encoding="utf-8", **kwargs):
"""
Serialize object to input file path using msgpack format.
.. deprecated:: 0.25.0
to_msgpack is deprecated and will be removed in a future version.
It is recommended to use pyarrow for on-the-wire transmission of
pandas objects.
Parameters
----------
path : string File path, buffer-like, or None
if None, return generated bytes
append : bool whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
Returns
-------
None or bytes
If path_or_buf is None, returns the resulting msgpack format as a
byte string. Otherwise returns None.
"""
from pandas.io import packers
return packers.to_msgpack(path_or_buf, self, encoding=encoding, **kwargs)
def to_sql(
self,
name: str,
con,
schema=None,
if_exists="fail",
index=True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
):
"""
Write records stored in a DataFrame to a SQL database.
Databases supported by SQLAlchemy [1]_ are supported. Tables can be
newly created, appended to, or overwritten.
Parameters
----------
name : str
Name of SQL table.
con : sqlalchemy.engine.Engine or sqlite3.Connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. Legacy support is provided for sqlite3.Connection objects.
schema : str, optional
Specify the schema (if database flavor supports this). If None, use
default schema.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
How to behave if the table already exists.
* fail: Raise a ValueError.
* replace: Drop the table before inserting new values.
* append: Insert new values to the existing table.
index : bool, default True
Write DataFrame index as a column. Uses `index_label` as the column
name in the table.
index_label : str or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 legacy mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
Raises
------
ValueError
When the table already exists and `if_exists` is 'fail' (the
default).
See Also
--------
read_sql : Read a DataFrame from a table.
Notes
-----
Timezone aware datetime columns will be written as
``Timestamp with timezone`` type with SQLAlchemy if supported by the
database. Otherwise, the datetimes will be stored as timezone unaware
timestamps local to the original timezone.
.. versionadded:: 0.24.0
References
----------
.. [1] http://docs.sqlalchemy.org
.. [2] https://www.python.org/dev/peps/pep-0249/
Examples
--------
Create an in-memory SQLite database.
>>> from sqlalchemy import create_engine
>>> engine = create_engine('sqlite://', echo=False)
Create a table from scratch with 3 rows.
>>> df = pd.DataFrame({'name' : ['User 1', 'User 2', 'User 3']})
>>> df
name
0 User 1
1 User 2
2 User 3
>>> df.to_sql('users', con=engine)
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3')]
>>> df1 = pd.DataFrame({'name' : ['User 4', 'User 5']})
>>> df1.to_sql('users', con=engine, if_exists='append')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 1'), (1, 'User 2'), (2, 'User 3'),
(0, 'User 4'), (1, 'User 5')]
Overwrite the table with just ``df1``.
>>> df1.to_sql('users', con=engine, if_exists='replace',
... index_label='id')
>>> engine.execute("SELECT * FROM users").fetchall()
[(0, 'User 4'), (1, 'User 5')]
Specify the dtype (especially useful for integers with missing values).
Notice that while pandas is forced to store the data as floating point,
the database supports nullable integers. When fetching the data with
Python, we get back integer scalars.
>>> df = pd.DataFrame({"A": [1, None, 2]})
>>> df
A
0 1.0
1 NaN
2 2.0
>>> from sqlalchemy.types import Integer
>>> df.to_sql('integers', con=engine, index=False,
... dtype={"A": Integer()})
>>> engine.execute("SELECT * FROM integers").fetchall()
[(1,), (None,), (2,)]
"""
from pandas.io import sql
sql.to_sql(
self,
name,
con,
schema=schema,
if_exists=if_exists,
index=index,
index_label=index_label,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def to_pickle(self, path, compression="infer", protocol=pickle.HIGHEST_PROTOCOL):
"""
Pickle (serialize) object to file.
Parameters
----------
path : str
File path where the pickled object will be stored.
compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, \
default 'infer'
A string representing the compression to use in the output file. By
default, infers from the file extension in specified path.
.. versionadded:: 0.20.0
protocol : int
Int which indicates which protocol should be used by the pickler,
default HIGHEST_PROTOCOL (see [1]_ paragraph 12.1.2). The possible
values are 0, 1, 2, 3, 4. A negative value for the protocol
parameter is equivalent to setting its value to HIGHEST_PROTOCOL.
.. [1] https://docs.python.org/3/library/pickle.html
.. versionadded:: 0.21.0
See Also
--------
read_pickle : Load pickled pandas object (or any object) from file.
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_sql : Write DataFrame to a SQL database.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Examples
--------
>>> original_df = pd.DataFrame({"foo": range(5), "bar": range(5, 10)})
>>> original_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> original_df.to_pickle("./dummy.pkl")
>>> unpickled_df = pd.read_pickle("./dummy.pkl")
>>> unpickled_df
foo bar
0 0 5
1 1 6
2 2 7
3 3 8
4 4 9
>>> import os
>>> os.remove("./dummy.pkl")
"""
from pandas.io.pickle import to_pickle
to_pickle(self, path, compression=compression, protocol=protocol)
def to_clipboard(self, excel=True, sep=None, **kwargs):
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs)
def to_xarray(self):
"""
Return an xarray object from the pandas object.
Returns
-------
xarray.DataArray or xarray.Dataset
Data in the pandas structure converted to Dataset if the object is
a DataFrame, or a DataArray if the object is a Series.
See Also
--------
DataFrame.to_hdf : Write DataFrame to an HDF5 file.
DataFrame.to_parquet : Write a DataFrame to the binary parquet format.
Notes
-----
See the `xarray docs <http://xarray.pydata.org/en/stable/>`__
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0, 2),
... ('parrot', 'bird', 24.0, 2),
... ('lion', 'mammal', 80.5, 4),
... ('monkey', 'mammal', np.nan, 4)],
... columns=['name', 'class', 'max_speed',
... 'num_legs'])
>>> df
name class max_speed num_legs
0 falcon bird 389.0 2
1 parrot bird 24.0 2
2 lion mammal 80.5 4
3 monkey mammal NaN 4
>>> df.to_xarray()
<xarray.Dataset>
Dimensions: (index: 4)
Coordinates:
* index (index) int64 0 1 2 3
Data variables:
name (index) object 'falcon' 'parrot' 'lion' 'monkey'
class (index) object 'bird' 'bird' 'mammal' 'mammal'
max_speed (index) float64 389.0 24.0 80.5 nan
num_legs (index) int64 2 2 4 4
>>> df['max_speed'].to_xarray()
<xarray.DataArray 'max_speed' (index: 4)>
array([389. , 24. , 80.5, nan])
Coordinates:
* index (index) int64 0 1 2 3
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-01',
... '2018-01-02', '2018-01-02'])
>>> df_multiindex = pd.DataFrame({'date': dates,
... 'animal': ['falcon', 'parrot', 'falcon',
... 'parrot'],
... 'speed': [350, 18, 361, 15]}).set_index(['date',
... 'animal'])
>>> df_multiindex
speed
date animal
2018-01-01 falcon 350
parrot 18
2018-01-02 falcon 361
parrot 15
>>> df_multiindex.to_xarray()
<xarray.Dataset>
Dimensions: (animal: 2, date: 2)
Coordinates:
* date (date) datetime64[ns] 2018-01-01 2018-01-02
* animal (animal) object 'falcon' 'parrot'
Data variables:
speed (date, animal) int64 350 18 361 15
"""
xarray = import_optional_dependency("xarray")
if self.ndim == 1:
return xarray.DataArray.from_series(self)
else:
return xarray.Dataset.from_dataframe(self)
@Substitution(returns=fmt.return_docstring)
def to_latex(
self,
buf=None,
columns=None,
col_space=None,
header=True,
index=True,
na_rep="NaN",
formatters=None,
float_format=None,
sparsify=None,
index_names=True,
bold_rows=False,
column_format=None,
longtable=None,
escape=None,
encoding=None,
decimal=".",
multicolumn=None,
multicolumn_format=None,
multirow=None,
):
r"""
Render an object to a LaTeX tabular environment table.
Render an object to a tabular environment table. You can splice
this into a LaTeX document. Requires \usepackage{booktabs}.
.. versionchanged:: 0.20.2
Added to Series
Parameters
----------
buf : str, Path or StringIO-like, optional, default None
Buffer to write to. If None, the output is returned as a string.
columns : list of label, optional
The subset of columns to write. Writes all columns by default.
col_space : int, optional
The minimum width of each column.
header : bool or list of str, default True
Write out the column names. If a list of strings is given,
it is assumed to be aliases for the column names.
index : bool, default True
Write row names (index).
na_rep : str, default 'NaN'
Missing data representation.
formatters : list of functions or dict of {str: function}, optional
Formatter functions to apply to columns' elements by position or
name. The result of each function must be a unicode string.
List must be of length equal to the number of columns.
float_format : one-parameter function or str, optional, default None
Formatter for floating point numbers. For example
``float_format="%%.2f"`` and ``float_format="{:0.2f}".format`` will
both result in 0.1234 being formatted as 0.12.
sparsify : bool, optional
Set to False for a DataFrame with a hierarchical index to print
every multiindex key at each row. By default, the value will be
read from the config module.
index_names : bool, default True
Prints the names of the indexes.
bold_rows : bool, default False
Make the row labels bold in the output.
column_format : str, optional
The columns format as specified in `LaTeX table format
<https://en.wikibooks.org/wiki/LaTeX/Tables>`__ e.g. 'rcl' for 3
columns. By default, 'l' will be used for all columns except
columns of numbers, which default to 'r'.
longtable : bool, optional
By default, the value will be read from the pandas config
module. Use a longtable environment instead of tabular. Requires
adding a \usepackage{longtable} to your LaTeX preamble.
escape : bool, optional
By default, the value will be read from the pandas config
module. When set to False prevents from escaping latex special
characters in column names.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
decimal : str, default '.'
Character recognized as decimal separator, e.g. ',' in Europe.
multicolumn : bool, default True
Use \multicolumn to enhance MultiIndex columns.
The default will be read from the config module.
.. versionadded:: 0.20.0
multicolumn_format : str, default 'l'
The alignment for multicolumns, similar to `column_format`
The default will be read from the config module.
.. versionadded:: 0.20.0
multirow : bool, default False
Use \multirow to enhance MultiIndex rows. Requires adding a
\usepackage{multirow} to your LaTeX preamble. Will print
centered labels (instead of top-aligned) across the contained
rows, separating groups via clines. The default will be read
from the pandas config module.
.. versionadded:: 0.20.0
%(returns)s
See Also
--------
DataFrame.to_string : Render a DataFrame to a console-friendly
tabular output.
DataFrame.to_html : Render a DataFrame as an HTML table.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> print(df.to_latex(index=False)) # doctest: +NORMALIZE_WHITESPACE
\begin{tabular}{lll}
\toprule
name & mask & weapon \\
\midrule
Raphael & red & sai \\
Donatello & purple & bo staff \\
\bottomrule
\end{tabular}
"""
# Get defaults from the pandas config
if self.ndim == 1:
self = self.to_frame()
if longtable is None:
longtable = config.get_option("display.latex.longtable")
if escape is None:
escape = config.get_option("display.latex.escape")
if multicolumn is None:
multicolumn = config.get_option("display.latex.multicolumn")
if multicolumn_format is None:
multicolumn_format = config.get_option("display.latex.multicolumn_format")
if multirow is None:
multirow = config.get_option("display.latex.multirow")
formatter = DataFrameFormatter(
self,
columns=columns,
col_space=col_space,
na_rep=na_rep,
header=header,
index=index,
formatters=formatters,
float_format=float_format,
bold_rows=bold_rows,
sparsify=sparsify,
index_names=index_names,
escape=escape,
decimal=decimal,
)
return formatter.to_latex(
buf=buf,
column_format=column_format,
longtable=longtable,
encoding=encoding,
multicolumn=multicolumn,
multicolumn_format=multicolumn_format,
multirow=multirow,
)
def to_csv(
self,
path_or_buf: Optional[FilePathOrBuffer] = None,
sep: str = ",",
na_rep: str = "",
float_format: Optional[str] = None,
columns: Optional[Sequence[Hashable]] = None,
header: Union[bool_t, List[str]] = True,
index: bool_t = True,
index_label: Optional[Union[bool_t, str, Sequence[Hashable]]] = None,
mode: str = "w",
encoding: Optional[str] = None,
compression: Optional[Union[str, Dict[str, str]]] = "infer",
quoting: Optional[int] = None,
quotechar: str = '"',
line_terminator: Optional[str] = None,
chunksize: Optional[int] = None,
date_format: Optional[str] = None,
doublequote: bool_t = True,
escapechar: Optional[str] = None,
decimal: Optional[str] = ".",
) -> Optional[str]:
r"""
Write object to a comma-separated values (csv) file.
.. versionchanged:: 0.24.0
The order of arguments for Series was changed.
Parameters
----------
path_or_buf : str or file handle, default None
File path or object, if None is provided the result is returned as
a string. If a file object is passed it should be opened with
`newline=''`, disabling universal newlines.
.. versionchanged:: 0.24.0
Was previously named "path" for Series.
sep : str, default ','
String of length 1. Field delimiter for the output file.
na_rep : str, default ''
Missing data representation.
float_format : str, default None
Format string for floating point numbers.
columns : sequence, optional
Columns to write.
header : bool or list of str, default True
Write out the column names. If a list of strings is given it is
assumed to be aliases for the column names.
.. versionchanged:: 0.24.0
Previously defaulted to False for Series.
index : bool, default True
Write row names (index).
index_label : str or sequence, or False, default None
Column label for index column(s) if desired. If None is given, and
`header` and `index` are True, then the index names are used. A
sequence should be given if the object uses MultiIndex. If
False do not print fields for index names. Use index_label=False
for easier importing in R.
mode : str
Python write mode, default 'w'.
encoding : str, optional
A string representing the encoding to use in the output file,
defaults to 'utf-8'.
compression : str or dict, default 'infer'
If str, represents compression mode. If dict, value at 'method' is
the compression mode. Compression mode may be any of the following
possible values: {'infer', 'gzip', 'bz2', 'zip', 'xz', None}. If
compression mode is 'infer' and `path_or_buf` is path-like, then
detect compression mode from the following extensions: '.gz',
'.bz2', '.zip' or '.xz'. (otherwise no compression). If dict given
and mode is 'zip' or inferred as 'zip', other entries passed as
additional compression options.
.. versionchanged:: 0.25.0
May now be a dict with key 'method' as compression mode
and other entries as additional compression options if
compression mode is 'zip'.
quoting : optional constant from csv module
Defaults to csv.QUOTE_MINIMAL. If you have set a `float_format`
then floats are converted to strings and thus csv.QUOTE_NONNUMERIC
will treat them as non-numeric.
quotechar : str, default '\"'
String of length 1. Character used to quote fields.
line_terminator : str, optional
The newline character or character sequence to use in the output
file. Defaults to `os.linesep`, which depends on the OS in which
this method is called ('\n' for linux, '\r\n' for Windows, i.e.).
.. versionchanged:: 0.24.0
chunksize : int or None
Rows to write at a time.
date_format : str, default None
Format string for datetime objects.
doublequote : bool, default True
Control quoting of `quotechar` inside a field.
escapechar : str, default None
String of length 1. Character used to escape `sep` and `quotechar`
when appropriate.
decimal : str, default '.'
Character recognized as decimal separator. E.g. use ',' for
European data.
Returns
-------
None or str
If path_or_buf is None, returns the resulting csv format as a
string. Otherwise returns None.
See Also
--------
read_csv : Load a CSV file into a DataFrame.
to_excel : Write DataFrame to an Excel file.
Examples
--------
>>> df = pd.DataFrame({'name': ['Raphael', 'Donatello'],
... 'mask': ['red', 'purple'],
... 'weapon': ['sai', 'bo staff']})
>>> df.to_csv(index=False)
'name,mask,weapon\nRaphael,red,sai\nDonatello,purple,bo staff\n'
# create 'out.zip' containing 'out.csv'
>>> compression_opts = dict(method='zip',
... archive_name='out.csv') # doctest: +SKIP
>>> df.to_csv('out.zip', index=False,
... compression=compression_opts) # doctest: +SKIP
"""
df = self if isinstance(self, ABCDataFrame) else self.to_frame()
from pandas.io.formats.csvs import CSVFormatter
formatter = CSVFormatter(
df,
path_or_buf,
line_terminator=line_terminator,
sep=sep,
encoding=encoding,
compression=compression,
quoting=quoting,
na_rep=na_rep,
float_format=float_format,
cols=columns,
header=header,
index=index,
index_label=index_label,
mode=mode,
chunksize=chunksize,
quotechar=quotechar,
date_format=date_format,
doublequote=doublequote,
escapechar=escapechar,
decimal=decimal,
)
formatter.save()
if path_or_buf is None:
return formatter.path_or_buf.getvalue()
return None
# ----------------------------------------------------------------------
# Fancy Indexing
@classmethod
def _create_indexer(cls, name, indexer):
"""Create an indexer like _name in the class."""
if getattr(cls, name, None) is None:
_indexer = functools.partial(indexer, name)
setattr(cls, name, property(_indexer, doc=indexer.__doc__))
# ----------------------------------------------------------------------
# Lookup Caching
def _set_as_cached(self, item, cacher):
"""Set the _cacher attribute on the calling object with a weakref to
cacher.
"""
self._cacher = (item, weakref.ref(cacher))
def _reset_cacher(self):
"""Reset the cacher."""
if hasattr(self, "_cacher"):
del self._cacher
def _maybe_cache_changed(self, item, value):
"""The object has called back to us saying maybe it has changed.
"""
self._data.set(item, value)
@property
def _is_cached(self):
"""Return boolean indicating if self is cached or not."""
return getattr(self, "_cacher", None) is not None
def _get_cacher(self):
"""return my cacher or None"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
cacher = cacher[1]()
return cacher
def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
"""
See if we need to update our parent cacher if clear, then clear our
cache.
Parameters
----------
clear : bool, default False
Clear the item cache.
verify_is_copy : bool, default True
Provide is_copy checks.
"""
cacher = getattr(self, "_cacher", None)
if cacher is not None:
ref = cacher[1]()
# we are trying to reference a dead referant, hence
# a copy
if ref is None:
del self._cacher
else:
try:
ref._maybe_cache_changed(cacher[0], self)
except Exception:
pass
if verify_is_copy:
self._check_setitem_copy(stacklevel=5, t="referant")
if clear:
self._clear_item_cache()
def _clear_item_cache(self):
self._item_cache.clear()
# ----------------------------------------------------------------------
# Indexing Methods
def take(self, indices, axis=0, is_copy=True, **kwargs):
"""
Return the elements in the given *positional* indices along an axis.
This means that we are not indexing according to actual values in
the index attribute of the object. We are indexing according to the
actual position of the element in the object.
Parameters
----------
indices : array-like
An array of ints indicating which positions to take.
axis : {0 or 'index', 1 or 'columns', None}, default 0
The axis on which to select elements. ``0`` means that we are
selecting rows, ``1`` means that we are selecting columns.
is_copy : bool, default True
Whether to return a copy of the original object or not.
**kwargs
For compatibility with :meth:`numpy.take`. Has no effect on the
output.
Returns
-------
taken : same type as caller
An array-like containing the elements taken from the object.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by labels.
DataFrame.iloc : Select a subset of a DataFrame by positions.
numpy.take : Take elements from an array along an axis.
Examples
--------
>>> df = pd.DataFrame([('falcon', 'bird', 389.0),
... ('parrot', 'bird', 24.0),
... ('lion', 'mammal', 80.5),
... ('monkey', 'mammal', np.nan)],
... columns=['name', 'class', 'max_speed'],
... index=[0, 2, 3, 1])
>>> df
name class max_speed
0 falcon bird 389.0
2 parrot bird 24.0
3 lion mammal 80.5
1 monkey mammal NaN
Take elements at positions 0 and 3 along the axis 0 (default).
Note how the actual indices selected (0 and 1) do not correspond to
our selected indices 0 and 3. That's because we are selecting the 0th
and 3rd rows, not rows whose indices equal 0 and 3.
>>> df.take([0, 3])
name class max_speed
0 falcon bird 389.0
1 monkey mammal NaN
Take elements at indices 1 and 2 along the axis 1 (column selection).
>>> df.take([1, 2], axis=1)
class max_speed
0 bird 389.0
2 bird 24.0
3 mammal 80.5
1 mammal NaN
We may take elements using negative integers for positive indices,
starting from the end of the object, just like with Python lists.
>>> df.take([-1, -2])
name class max_speed
1 monkey mammal NaN
3 lion mammal 80.5
"""
nv.validate_take(tuple(), kwargs)
self._consolidate_inplace()
new_data = self._data.take(
indices, axis=self._get_block_manager_axis(axis), verify=True
)
result = self._constructor(new_data).__finalize__(self)
# Maybe set copy if we didn't actually change the index.
if is_copy:
if not result._get_axis(axis).equals(self._get_axis(axis)):
result._set_is_copy(self)
return result
def xs(self, key, axis=0, level=None, drop_level=True):
"""
Return cross-section from the Series/DataFrame.
This method takes a `key` argument to select data at a particular
level of a MultiIndex.
Parameters
----------
key : label or tuple of label
Label contained in the index, or partially in a MultiIndex.
axis : {0 or 'index', 1 or 'columns'}, default 0
Axis to retrieve cross-section on.
level : object, defaults to first n levels (n=1 or len(key))
In case of a key partially contained in a MultiIndex, indicate
which levels are used. Levels can be referred by label or position.
drop_level : bool, default True
If False, returns object with same levels as self.
Returns
-------
Series or DataFrame
Cross-section from the original Series or DataFrame
corresponding to the selected index levels.
See Also
--------
DataFrame.loc : Access a group of rows and columns
by label(s) or a boolean array.
DataFrame.iloc : Purely integer-location based indexing
for selection by position.
Notes
-----
`xs` can not be used to set values.
MultiIndex Slicers is a generic way to get/set values on
any level or levels.
It is a superset of `xs` functionality, see
:ref:`MultiIndex Slicers <advanced.mi_slicers>`.
Examples
--------
>>> d = {'num_legs': [4, 4, 2, 2],
... 'num_wings': [0, 0, 2, 2],
... 'class': ['mammal', 'mammal', 'mammal', 'bird'],
... 'animal': ['cat', 'dog', 'bat', 'penguin'],
... 'locomotion': ['walks', 'walks', 'flies', 'walks']}
>>> df = pd.DataFrame(data=d)
>>> df = df.set_index(['class', 'animal', 'locomotion'])
>>> df
num_legs num_wings
class animal locomotion
mammal cat walks 4 0
dog walks 4 0
bat flies 2 2
bird penguin walks 2 2
Get values at specified index
>>> df.xs('mammal')
num_legs num_wings
animal locomotion
cat walks 4 0
dog walks 4 0
bat flies 2 2
Get values at several indexes
>>> df.xs(('mammal', 'dog'))
num_legs num_wings
locomotion
walks 4 0
Get values at specified index and level
>>> df.xs('cat', level=1)
num_legs num_wings
class locomotion
mammal walks 4 0
Get values at several indexes and levels
>>> df.xs(('bird', 'walks'),
... level=[0, 'locomotion'])
num_legs num_wings
animal
penguin 2 2
Get values at specified column and axis
>>> df.xs('num_wings', axis=1)
class animal locomotion
mammal cat walks 0
dog walks 0
bat flies 2
bird penguin walks 2
Name: num_wings, dtype: int64
"""
axis = self._get_axis_number(axis)
labels = self._get_axis(axis)
if level is not None:
loc, new_ax = labels.get_loc_level(key, level=level, drop_level=drop_level)
# create the tuple of the indexer
indexer = [slice(None)] * self.ndim
indexer[axis] = loc
indexer = tuple(indexer)
result = self.iloc[indexer]
setattr(result, result._get_axis_name(axis), new_ax)
return result
if axis == 1:
return self[key]
self._consolidate_inplace()
index = self.index
if isinstance(index, MultiIndex):
loc, new_index = self.index.get_loc_level(key, drop_level=drop_level)
else:
loc = self.index.get_loc(key)
if isinstance(loc, np.ndarray):
if loc.dtype == np.bool_:
inds, = loc.nonzero()
return self.take(inds, axis=axis)
else:
return self.take(loc, axis=axis)
if not is_scalar(loc):
new_index = self.index[loc]
if is_scalar(loc):
new_values = self._data.fast_xs(loc)
# may need to box a datelike-scalar
#
# if we encounter an array-like and we only have 1 dim
# that means that their are list/ndarrays inside the Series!
# so just return them (GH 6394)
if not is_list_like(new_values) or self.ndim == 1:
return com.maybe_box_datetimelike(new_values)
result = self._constructor_sliced(
new_values,
index=self.columns,
name=self.index[loc],
dtype=new_values.dtype,
)
else:
result = self.iloc[loc]
result.index = new_index
# this could be a view
# but only in a single-dtyped view sliceable case
result._set_is_copy(self, copy=not result._is_view)
return result
_xs = xs # type: Callable
def __getitem__(self, item):
raise AbstractMethodError(self)
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res
def _iget_item_cache(self, item):
"""Return the cached item, item represents a positional indexer."""
ax = self._info_axis
if ax.is_unique:
lower = self._get_item_cache(ax[item])
else:
lower = self.take(item, axis=self._info_axis_number)
return lower
def _box_item_values(self, key, values):
raise AbstractMethodError(self)
def _slice(self, slobj: slice, axis=0, kind=None):
"""
Construct a slice of this container.
kind parameter is maintained for compatibility with Series slicing.
"""
axis = self._get_block_manager_axis(axis)
result = self._constructor(self._data.get_slice(slobj, axis=axis))
result = result.__finalize__(self)
# this could be a view
# but only in a single-dtyped view sliceable case
is_copy = axis != 0 or result._is_view
result._set_is_copy(self, copy=is_copy)
return result
def _set_item(self, key, value):
self._data.set(key, value)
self._clear_item_cache()
def _set_is_copy(self, ref=None, copy=True):
if not copy:
self._is_copy = None
else:
if ref is not None:
self._is_copy = weakref.ref(ref)
else:
self._is_copy = None
def _check_is_chained_assignment_possible(self):
"""
Check if we are a view, have a cacher, and are of mixed type.
If so, then force a setitem_copy check.
Should be called just near setting a value
Will return a boolean if it we are a view and are cached, but a
single-dtype meaning that the cacher should be updated following
setting.
"""
if self._is_view and self._is_cached:
ref = self._get_cacher()
if ref is not None and ref._is_mixed_type:
self._check_setitem_copy(stacklevel=4, t="referant", force=True)
return True
elif self._is_copy:
self._check_setitem_copy(stacklevel=4, t="referant")
return False
def _check_setitem_copy(self, stacklevel=4, t="setting", force=False):
"""
Parameters
----------
stacklevel : int, default 4
the level to show of the stack when the error is output
t : str, the type of setting error
force : bool, default False
If True, then force showing an error.
validate if we are doing a setitem on a chained copy.
If you call this function, be sure to set the stacklevel such that the
user will see the error *at the level of setting*
It is technically possible to figure out that we are setting on
a copy even WITH a multi-dtyped pandas object. In other words, some
blocks may be views while other are not. Currently _is_view will ALWAYS
return False for multi-blocks to avoid having to handle this case.
df = DataFrame(np.arange(0,9), columns=['count'])
df['group'] = 'b'
# This technically need not raise SettingWithCopy if both are view
# (which is not # generally guaranteed but is usually True. However,
# this is in general not a good practice and we recommend using .loc.
df.iloc[0:5]['group'] = 'a'
"""
# return early if the check is not needed
if not (force or self._is_copy):
return
value = config.get_option("mode.chained_assignment")
if value is None:
return
# see if the copy is not actually referred; if so, then dissolve
# the copy weakref
if self._is_copy is not None and not isinstance(self._is_copy, str):
r = self._is_copy()
if not gc.get_referents(r) or r.shape == self.shape:
self._is_copy = None
return
# a custom message
if isinstance(self._is_copy, str):
t = self._is_copy
elif t == "referant":
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame\n\n"
"See the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
else:
t = (
"\n"
"A value is trying to be set on a copy of a slice from a "
"DataFrame.\n"
"Try using .loc[row_indexer,col_indexer] = value "
"instead\n\nSee the caveats in the documentation: "
"http://pandas.pydata.org/pandas-docs/stable/user_guide/"
"indexing.html#returning-a-view-versus-a-copy"
)
if value == "raise":
raise com.SettingWithCopyError(t)
elif value == "warn":
warnings.warn(t, com.SettingWithCopyWarning, stacklevel=stacklevel)
def __delitem__(self, key):
"""
Delete item
"""
deleted = False
maybe_shortcut = False
if self.ndim == 2 and isinstance(self.columns, MultiIndex):
try:
maybe_shortcut = key not in self.columns._engine
except TypeError:
pass
if maybe_shortcut:
# Allow shorthand to delete all columns whose first len(key)
# elements match key:
if not isinstance(key, tuple):
key = (key,)
for col in self.columns:
if isinstance(col, tuple) and col[: len(key)] == key:
del self[col]
deleted = True
if not deleted:
# If the above loop ran and didn't delete anything because
# there was no match, this call should raise the appropriate
# exception:
self._data.delete(key)
# delete from the caches
try:
del self._item_cache[key]
except KeyError:
pass
# ----------------------------------------------------------------------
# Unsorted
def get(self, key, default=None):
"""
Get item from object for given key (ex: DataFrame column).
Returns default value if not found.
Parameters
----------
key : object
Returns
-------
value : same type as items contained in object
"""
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
@property
def _is_view(self):
"""Return boolean indicating if self is view of another array """
return self._data.is_view
def reindex_like(self, other, method=None, copy=True, limit=None, tolerance=None):
"""
Return an object with matching indices as other object.
Conform the object to the same index on all axes. Optional
filling logic, placing NaN in locations having no value
in the previous index. A new object is produced unless the
new index is equivalent to the current one and copy=False.
Parameters
----------
other : Object of the same data type
Its row and column indices are used to define the new indices
of this object.
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
limit : int, default None
Maximum number of consecutive labels to fill for inexact matches.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
Series or DataFrame
Same type as caller, but with changed indices on each axis.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex : Change to new indices or expand indices.
Notes
-----
Same as calling
``.reindex(index=other.index, columns=other.columns,...)``.
Examples
--------
>>> df1 = pd.DataFrame([[24.3, 75.7, 'high'],
... [31, 87.8, 'high'],
... [22, 71.6, 'medium'],
... [35, 95, 'medium']],
... columns=['temp_celsius', 'temp_fahrenheit', 'windspeed'],
... index=pd.date_range(start='2014-02-12',
... end='2014-02-15', freq='D'))
>>> df1
temp_celsius temp_fahrenheit windspeed
2014-02-12 24.3 75.7 high
2014-02-13 31.0 87.8 high
2014-02-14 22.0 71.6 medium
2014-02-15 35.0 95.0 medium
>>> df2 = pd.DataFrame([[28, 'low'],
... [30, 'low'],
... [35.1, 'medium']],
... columns=['temp_celsius', 'windspeed'],
... index=pd.DatetimeIndex(['2014-02-12', '2014-02-13',
... '2014-02-15']))
>>> df2
temp_celsius windspeed
2014-02-12 28.0 low
2014-02-13 30.0 low
2014-02-15 35.1 medium
>>> df2.reindex_like(df1)
temp_celsius temp_fahrenheit windspeed
2014-02-12 28.0 NaN low
2014-02-13 30.0 NaN low
2014-02-14 NaN NaN NaN
2014-02-15 35.1 NaN medium
"""
d = other._construct_axes_dict(
axes=self._AXIS_ORDERS,
method=method,
copy=copy,
limit=limit,
tolerance=tolerance,
)
return self.reindex(**d)
def drop(
self,
labels=None,
axis=0,
index=None,
columns=None,
level=None,
inplace=False,
errors="raise",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if labels is not None:
if index is not None or columns is not None:
raise ValueError("Cannot specify both 'labels' and 'index'/'columns'")
axis_name = self._get_axis_name(axis)
axes = {axis_name: labels}
elif index is not None or columns is not None:
axes, _ = self._construct_axes_from_arguments((index, columns), {})
else:
raise ValueError(
"Need to specify at least one of 'labels', 'index' or 'columns'"
)
obj = self
for axis, labels in axes.items():
if labels is not None:
obj = obj._drop_axis(labels, axis, level=level, errors=errors)
if inplace:
self._update_inplace(obj)
else:
return obj
def _drop_axis(self, labels, axis, level=None, errors="raise"):
"""
Drop labels from specified axis. Used in the ``drop`` method
internally.
Parameters
----------
labels : single label or list-like
axis : int or axis name
level : int or level name, default None
For MultiIndex
errors : {'ignore', 'raise'}, default 'raise'
If 'ignore', suppress error and existing labels are dropped.
"""
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
axis = self._get_axis(axis)
if axis.is_unique:
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
new_axis = axis.drop(labels, level=level, errors=errors)
else:
new_axis = axis.drop(labels, errors=errors)
result = self.reindex(**{axis_name: new_axis})
# Case for non-unique axis
else:
labels = ensure_object(com.index_labels_to_array(labels))
if level is not None:
if not isinstance(axis, MultiIndex):
raise AssertionError("axis must be a MultiIndex")
indexer = ~axis.get_level_values(level).isin(labels)
# GH 18561 MultiIndex.drop should raise if label is absent
if errors == "raise" and indexer.all():
raise KeyError("{} not found in axis".format(labels))
else:
indexer = ~axis.isin(labels)
# Check if label doesn't exist along axis
labels_missing = (axis.get_indexer_for(labels) == -1).any()
if errors == "raise" and labels_missing:
raise KeyError("{} not found in axis".format(labels))
slicer = [slice(None)] * self.ndim
slicer[self._get_axis_number(axis_name)] = indexer
result = self.loc[tuple(slicer)]
return result
def _update_inplace(self, result, verify_is_copy=True):
"""
Replace self internals with result.
Parameters
----------
verify_is_copy : bool, default True
Provide is_copy checks.
"""
# NOTE: This does *not* call __finalize__ and that's an explicit
# decision that we may revisit in the future.
self._reset_cache()
self._clear_item_cache()
self._data = getattr(result, "_data", result)
self._maybe_update_cacher(verify_is_copy=verify_is_copy)
def add_prefix(self, prefix):
"""
Prefix labels with string `prefix`.
For Series, the row labels are prefixed.
For DataFrame, the column labels are prefixed.
Parameters
----------
prefix : str
The string to add before each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_suffix: Suffix row labels with string `suffix`.
DataFrame.add_suffix: Suffix column labels with string `suffix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_prefix('item_')
item_0 1
item_1 2
item_2 3
item_3 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_prefix('col_')
col_A col_B
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{prefix}{}".format, prefix=prefix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
def add_suffix(self, suffix):
"""
Suffix labels with string `suffix`.
For Series, the row labels are suffixed.
For DataFrame, the column labels are suffixed.
Parameters
----------
suffix : str
The string to add after each label.
Returns
-------
Series or DataFrame
New Series or DataFrame with updated labels.
See Also
--------
Series.add_prefix: Prefix row labels with string `prefix`.
DataFrame.add_prefix: Prefix column labels with string `prefix`.
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.add_suffix('_item')
0_item 1
1_item 2
2_item 3
3_item 4
dtype: int64
>>> df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [3, 4, 5, 6]})
>>> df
A B
0 1 3
1 2 4
2 3 5
3 4 6
>>> df.add_suffix('_col')
A_col B_col
0 1 3
1 2 4
2 3 5
3 4 6
"""
f = functools.partial("{}{suffix}".format, suffix=suffix)
mapper = {self._info_axis_name: f}
return self.rename(**mapper)
def sort_values(
self,
by=None,
axis=0,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
):
"""
Sort by the values along either axis.
Parameters
----------%(optional_by)s
axis : %(axes_single_arg)s, default 0
Axis to be sorted.
ascending : bool or list of bool, default True
Sort ascending vs. descending. Specify list for multiple sort
orders. If this is a list of bools, must match the length of
the by.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the
end.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted values if inplace=False, None otherwise.
Examples
--------
>>> df = pd.DataFrame({
... 'col1': ['A', 'A', 'B', np.nan, 'D', 'C'],
... 'col2': [2, 1, 9, 8, 7, 4],
... 'col3': [0, 1, 9, 4, 2, 3],
... })
>>> df
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
3 NaN 8 4
4 D 7 2
5 C 4 3
Sort by col1
>>> df.sort_values(by=['col1'])
col1 col2 col3
0 A 2 0
1 A 1 1
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort by multiple columns
>>> df.sort_values(by=['col1', 'col2'])
col1 col2 col3
1 A 1 1
0 A 2 0
2 B 9 9
5 C 4 3
4 D 7 2
3 NaN 8 4
Sort Descending
>>> df.sort_values(by='col1', ascending=False)
col1 col2 col3
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
3 NaN 8 4
Putting NAs first
>>> df.sort_values(by='col1', ascending=False, na_position='first')
col1 col2 col3
3 NaN 8 4
4 D 7 2
5 C 4 3
2 B 9 9
0 A 2 0
1 A 1 1
"""
raise AbstractMethodError(self)
def sort_index(
self,
axis=0,
level=None,
ascending=True,
inplace=False,
kind="quicksort",
na_position="last",
sort_remaining=True,
):
"""
Sort object by labels (along an axis).
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The axis along which to sort. The value 0 identifies the rows,
and 1 identifies the columns.
level : int or level name or list of ints or list of level names
If not None, sort on values in specified index level(s).
ascending : bool, default True
Sort ascending vs. descending.
inplace : bool, default False
If True, perform operation in-place.
kind : {'quicksort', 'mergesort', 'heapsort'}, default 'quicksort'
Choice of sorting algorithm. See also ndarray.np.sort for more
information. `mergesort` is the only stable algorithm. For
DataFrames, this option is only applied when sorting on a single
column or label.
na_position : {'first', 'last'}, default 'last'
Puts NaNs at the beginning if `first`; `last` puts NaNs at the end.
Not implemented for MultiIndex.
sort_remaining : bool, default True
If True and sorting by level and index is multilevel, sort by other
levels too (in order) after sorting by specified level.
Returns
-------
sorted_obj : DataFrame or None
DataFrame with sorted index if inplace=False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = self._get_axis_number(axis)
axis_name = self._get_axis_name(axis)
labels = self._get_axis(axis)
if level is not None:
raise NotImplementedError("level is not implemented")
if inplace:
raise NotImplementedError("inplace is not implemented")
sort_index = labels.argsort()
if not ascending:
sort_index = sort_index[::-1]
new_axis = labels.take(sort_index)
return self.reindex(**{axis_name: new_axis})
def reindex(self, *args, **kwargs):
"""
Conform %(klass)s to new index with optional filling logic, placing
NA/NaN in locations having no value in the previous index. A new object
is produced unless the new index is equivalent to the current one and
``copy=False``.
Parameters
----------
%(optional_labels)s
%(axes)s : array-like, optional
New labels / index to conform to, should be specified using
keywords. Preferably an Index object to avoid duplicating data
%(optional_axis)s
method : {None, 'backfill'/'bfill', 'pad'/'ffill', 'nearest'}
Method to use for filling holes in reindexed DataFrame.
Please note: this is only applicable to DataFrames/Series with a
monotonically increasing/decreasing index.
* None (default): don't fill gaps
* pad / ffill: propagate last valid observation forward to next
valid
* backfill / bfill: use next valid observation to fill gap
* nearest: use nearest valid observations to fill gap
copy : bool, default True
Return a new object, even if the passed indexes are the same.
level : int or name
Broadcast across a level, matching Index values on the
passed MultiIndex level.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value.
limit : int, default None
Maximum number of consecutive elements to forward or backward fill.
tolerance : optional
Maximum distance between original and new labels for inexact
matches. The values of the index at the matching locations most
satisfy the equation ``abs(index[indexer] - target) <= tolerance``.
Tolerance may be a scalar value, which applies the same tolerance
to all values, or list-like, which applies variable tolerance per
element. List-like includes list, tuple, array, Series, and must be
the same size as the index and its dtype must exactly match the
index's type.
.. versionadded:: 0.21.0 (list-like tolerance)
Returns
-------
%(klass)s with changed index.
See Also
--------
DataFrame.set_index : Set row labels.
DataFrame.reset_index : Remove row labels or move them to new columns.
DataFrame.reindex_like : Change to same indices as other DataFrame.
Examples
--------
``DataFrame.reindex`` supports two calling conventions
* ``(index=index_labels, columns=column_labels, ...)``
* ``(labels, axis={'index', 'columns'}, ...)``
We *highly* recommend using keyword arguments to clarify your
intent.
Create a dataframe with some fictional data.
>>> index = ['Firefox', 'Chrome', 'Safari', 'IE10', 'Konqueror']
>>> df = pd.DataFrame({
... 'http_status': [200,200,404,404,301],
... 'response_time': [0.04, 0.02, 0.07, 0.08, 1.0]},
... index=index)
>>> df
http_status response_time
Firefox 200 0.04
Chrome 200 0.02
Safari 404 0.07
IE10 404 0.08
Konqueror 301 1.00
Create a new index and reindex the dataframe. By default
values in the new index that do not have corresponding
records in the dataframe are assigned ``NaN``.
>>> new_index= ['Safari', 'Iceweasel', 'Comodo Dragon', 'IE10',
... 'Chrome']
>>> df.reindex(new_index)
http_status response_time
Safari 404.0 0.07
Iceweasel NaN NaN
Comodo Dragon NaN NaN
IE10 404.0 0.08
Chrome 200.0 0.02
We can fill in the missing values by passing a value to
the keyword ``fill_value``. Because the index is not monotonically
increasing or decreasing, we cannot use arguments to the keyword
``method`` to fill the ``NaN`` values.
>>> df.reindex(new_index, fill_value=0)
http_status response_time
Safari 404 0.07
Iceweasel 0 0.00
Comodo Dragon 0 0.00
IE10 404 0.08
Chrome 200 0.02
>>> df.reindex(new_index, fill_value='missing')
http_status response_time
Safari 404 0.07
Iceweasel missing missing
Comodo Dragon missing missing
IE10 404 0.08
Chrome 200 0.02
We can also reindex the columns.
>>> df.reindex(columns=['http_status', 'user_agent'])
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
Or we can use "axis-style" keyword arguments
>>> df.reindex(['http_status', 'user_agent'], axis="columns")
http_status user_agent
Firefox 200 NaN
Chrome 200 NaN
Safari 404 NaN
IE10 404 NaN
Konqueror 301 NaN
To further illustrate the filling functionality in
``reindex``, we will create a dataframe with a
monotonically increasing index (for example, a sequence
of dates).
>>> date_index = pd.date_range('1/1/2010', periods=6, freq='D')
>>> df2 = pd.DataFrame({"prices": [100, 101, np.nan, 100, 89, 88]},
... index=date_index)
>>> df2
prices
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
Suppose we decide to expand the dataframe to cover a wider
date range.
>>> date_index2 = pd.date_range('12/29/2009', periods=10, freq='D')
>>> df2.reindex(date_index2)
prices
2009-12-29 NaN
2009-12-30 NaN
2009-12-31 NaN
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
The index entries that did not have a value in the original data frame
(for example, '2009-12-29') are by default filled with ``NaN``.
If desired, we can fill in the missing values using one of several
options.
For example, to back-propagate the last valid value to fill the ``NaN``
values, pass ``bfill`` as an argument to the ``method`` keyword.
>>> df2.reindex(date_index2, method='bfill')
prices
2009-12-29 100.0
2009-12-30 100.0
2009-12-31 100.0
2010-01-01 100.0
2010-01-02 101.0
2010-01-03 NaN
2010-01-04 100.0
2010-01-05 89.0
2010-01-06 88.0
2010-01-07 NaN
Please note that the ``NaN`` value present in the original dataframe
(at index value 2010-01-03) will not be filled by any of the
value propagation schemes. This is because filling while reindexing
does not look at dataframe values, but only compares the original and
desired indexes. If you do want to fill in the ``NaN`` values present
in the original dataframe, use the ``fillna()`` method.
See the :ref:`user guide <basics.reindexing>` for more.
"""
# TODO: Decide if we care about having different examples for different
# kinds
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
method = missing.clean_reindex_fill_method(kwargs.pop("method", None))
level = kwargs.pop("level", None)
copy = kwargs.pop("copy", True)
limit = kwargs.pop("limit", None)
tolerance = kwargs.pop("tolerance", None)
fill_value = kwargs.pop("fill_value", None)
# Series.reindex doesn't use / need the axis kwarg
# We pop and ignore it here, to make writing Series/Frame generic code
# easier
kwargs.pop("axis", None)
if kwargs:
raise TypeError(
"reindex() got an unexpected keyword "
'argument "{0}"'.format(list(kwargs.keys())[0])
)
self._consolidate_inplace()
# if all axes that are requested to reindex are equal, then only copy
# if indicated must have index names equal here as well as values
if all(
self._get_axis(axis).identical(ax)
for axis, ax in axes.items()
if ax is not None
):
if copy:
return self.copy()
return self
# check if we are a multi reindex
if self._needs_reindex_multi(axes, method, level):
try:
return self._reindex_multi(axes, copy, fill_value)
except Exception:
pass
# perform the reindex on the axes
return self._reindex_axes(
axes, level, limit, tolerance, method, fill_value, copy
).__finalize__(self)
def _reindex_axes(self, axes, level, limit, tolerance, method, fill_value, copy):
"""Perform the reindex for all the axes."""
obj = self
for a in self._AXIS_ORDERS:
labels = axes[a]
if labels is None:
continue
ax = self._get_axis(a)
new_index, indexer = ax.reindex(
labels, level=level, limit=limit, tolerance=tolerance, method=method
)
axis = self._get_axis_number(a)
obj = obj._reindex_with_indexers(
{axis: [new_index, indexer]},
fill_value=fill_value,
copy=copy,
allow_dups=False,
)
return obj
def _needs_reindex_multi(self, axes, method, level):
"""Check if we do need a multi reindex."""
return (
(com.count_not_none(*axes.values()) == self._AXIS_LEN)
and method is None
and level is None
and not self._is_mixed_type
)
def _reindex_multi(self, axes, copy, fill_value):
return NotImplemented
def _reindex_with_indexers(
self, reindexers, fill_value=None, copy=False, allow_dups=False
):
"""allow_dups indicates an internal call here """
# reindex doing multiple operations on different axes if indicated
new_data = self._data
for axis in sorted(reindexers.keys()):
index, indexer = reindexers[axis]
baxis = self._get_block_manager_axis(axis)
if index is None:
continue
index = ensure_index(index)
if indexer is not None:
indexer = ensure_int64(indexer)
# TODO: speed up on homogeneous DataFrame objects
new_data = new_data.reindex_indexer(
index,
indexer,
axis=baxis,
fill_value=fill_value,
allow_dups=allow_dups,
copy=copy,
)
if copy and new_data is self._data:
new_data = new_data.copy()
return self._constructor(new_data).__finalize__(self)
def filter(self, items=None, like=None, regex=None, axis=None):
"""
Subset rows or columns of dataframe according to labels in
the specified index.
Note that this routine does not filter a dataframe on its
contents. The filter is applied to the labels of the index.
Parameters
----------
items : list-like
Keep labels from axis which are in items.
like : str
Keep labels from axis for which "like in label == True".
regex : str (regular expression)
Keep labels from axis for which re.search(regex, label) == True.
axis : int or string axis name
The axis to filter on. By default this is the info axis,
'index' for Series, 'columns' for DataFrame.
Returns
-------
same type as input object
See Also
--------
DataFrame.loc
Notes
-----
The ``items``, ``like``, and ``regex`` parameters are
enforced to be mutually exclusive.
``axis`` defaults to the info axis that is used when indexing
with ``[]``.
Examples
--------
>>> df = pd.DataFrame(np.array(([1, 2, 3], [4, 5, 6])),
... index=['mouse', 'rabbit'],
... columns=['one', 'two', 'three'])
>>> # select columns by name
>>> df.filter(items=['one', 'three'])
one three
mouse 1 3
rabbit 4 6
>>> # select columns by regular expression
>>> df.filter(regex='e$', axis=1)
one three
mouse 1 3
rabbit 4 6
>>> # select rows containing 'bbi'
>>> df.filter(like='bbi', axis=0)
one two three
rabbit 4 5 6
"""
nkw = com.count_not_none(items, like, regex)
if nkw > 1:
raise TypeError(
"Keyword arguments `items`, `like`, or `regex` "
"are mutually exclusive"
)
if axis is None:
axis = self._info_axis_name
labels = self._get_axis(axis)
if items is not None:
name = self._get_axis_name(axis)
return self.reindex(**{name: [r for r in items if r in labels]})
elif like:
def f(x):
return like in ensure_str(x)
values = labels.map(f)
return self.loc(axis=axis)[values]
elif regex:
def f(x):
return matcher.search(ensure_str(x)) is not None
matcher = re.compile(regex)
values = labels.map(f)
return self.loc(axis=axis)[values]
else:
raise TypeError("Must pass either `items`, `like`, or `regex`")
def head(self, n=5):
"""
Return the first `n` rows.
This function returns the first `n` rows for the object based
on position. It is useful for quickly testing if your object
has the right type of data in it.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
obj_head : same type as caller
The first `n` rows of the caller object.
See Also
--------
DataFrame.tail: Returns the last `n` rows.
Examples
--------
>>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the first 5 lines
>>> df.head()
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
Viewing the first `n` lines (three in this case)
>>> df.head(3)
animal
0 alligator
1 bee
2 falcon
"""
return self.iloc[:n]
def tail(self, n=5):
"""
Return the last `n` rows.
This function returns last `n` rows from the object based on
position. It is useful for quickly verifying data, for example,
after sorting or appending rows.
Parameters
----------
n : int, default 5
Number of rows to select.
Returns
-------
type of caller
The last `n` rows of the caller object.
See Also
--------
DataFrame.head : The first `n` rows of the caller object.
Examples
--------
>>> df = pd.DataFrame({'animal':['alligator', 'bee', 'falcon', 'lion',
... 'monkey', 'parrot', 'shark', 'whale', 'zebra']})
>>> df
animal
0 alligator
1 bee
2 falcon
3 lion
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last 5 lines
>>> df.tail()
animal
4 monkey
5 parrot
6 shark
7 whale
8 zebra
Viewing the last `n` lines (three in this case)
>>> df.tail(3)
animal
6 shark
7 whale
8 zebra
"""
if n == 0:
return self.iloc[0:0]
return self.iloc[-n:]
def sample(
self,
n=None,
frac=None,
replace=False,
weights=None,
random_state=None,
axis=None,
):
"""
Return a random sample of items from an axis of object.
You can use `random_state` for reproducibility.
Parameters
----------
n : int, optional
Number of items from axis to return. Cannot be used with `frac`.
Default = 1 if `frac` = None.
frac : float, optional
Fraction of axis items to return. Cannot be used with `n`.
replace : bool, default False
Allow or disallow sampling of the same row more than once.
weights : str or ndarray-like, optional
Default 'None' results in equal probability weighting.
If passed a Series, will align with target object on index. Index
values in weights not found in sampled object will be ignored and
index values in sampled object not in weights will be assigned
weights of zero.
If called on a DataFrame, will accept the name of a column
when axis = 0.
Unless weights are a Series, weights must be same length as axis
being sampled.
If weights do not sum to 1, they will be normalized to sum to 1.
Missing values in the weights column will be treated as zero.
Infinite values not allowed.
random_state : int or numpy.random.RandomState, optional
Seed for the random number generator (if int), or numpy RandomState
object.
axis : int or string, optional
Axis to sample. Accepts axis number or name. Default is stat axis
for given data type (0 for Series and DataFrames).
Returns
-------
Series or DataFrame
A new object of same type as caller containing `n` items randomly
sampled from the caller object.
See Also
--------
numpy.random.choice: Generates a random sample from a given 1-D numpy
array.
Examples
--------
>>> df = pd.DataFrame({'num_legs': [2, 4, 8, 0],
... 'num_wings': [2, 0, 0, 0],
... 'num_specimen_seen': [10, 2, 1, 8]},
... index=['falcon', 'dog', 'spider', 'fish'])
>>> df
num_legs num_wings num_specimen_seen
falcon 2 2 10
dog 4 0 2
spider 8 0 1
fish 0 0 8
Extract 3 random elements from the ``Series`` ``df['num_legs']``:
Note that we use `random_state` to ensure the reproducibility of
the examples.
>>> df['num_legs'].sample(n=3, random_state=1)
fish 0
spider 8
falcon 2
Name: num_legs, dtype: int64
A random 50% sample of the ``DataFrame`` with replacement:
>>> df.sample(frac=0.5, replace=True, random_state=1)
num_legs num_wings num_specimen_seen
dog 4 0 2
fish 0 0 8
Using a DataFrame column as weights. Rows with larger value in the
`num_specimen_seen` column are more likely to be sampled.
>>> df.sample(n=2, weights='num_specimen_seen', random_state=1)
num_legs num_wings num_specimen_seen
falcon 2 2 10
fish 0 0 8
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
axis_length = self.shape[axis]
# Process random_state argument
rs = com.random_state(random_state)
# Check weights for compliance
if weights is not None:
# If a series, align with frame
if isinstance(weights, ABCSeries):
weights = weights.reindex(self.axes[axis])
# Strings acceptable if a dataframe and axis = 0
if isinstance(weights, str):
if isinstance(self, ABCDataFrame):
if axis == 0:
try:
weights = self[weights]
except KeyError:
raise KeyError(
"String passed to weights not a valid column"
)
else:
raise ValueError(
"Strings can only be passed to "
"weights when sampling from rows on "
"a DataFrame"
)
else:
raise ValueError(
"Strings cannot be passed as weights "
"when sampling from a Series."
)
weights = pd.Series(weights, dtype="float64")
if len(weights) != axis_length:
raise ValueError(
"Weights and axis to be sampled must be of same length"
)
if (weights == np.inf).any() or (weights == -np.inf).any():
raise ValueError("weight vector may not include `inf` values")
if (weights < 0).any():
raise ValueError("weight vector many not include negative values")
# If has nan, set to zero.
weights = weights.fillna(0)
# Renormalize if don't sum to 1
if weights.sum() != 1:
if weights.sum() != 0:
weights = weights / weights.sum()
else:
raise ValueError("Invalid weights: weights sum to zero")
weights = weights.values
# If no frac or n, default to n=1.
if n is None and frac is None:
n = 1
elif n is not None and frac is None and n % 1 != 0:
raise ValueError("Only integers accepted as `n` values")
elif n is None and frac is not None:
n = int(round(frac * axis_length))
elif n is not None and frac is not None:
raise ValueError("Please enter a value for `frac` OR `n`, not both")
# Check for negative sizes
if n < 0:
raise ValueError(
"A negative number of rows requested. Please provide positive value."
)
locs = rs.choice(axis_length, size=n, replace=replace, p=weights)
return self.take(locs, axis=axis, is_copy=False)
_shared_docs[
"pipe"
] = r"""
Apply func(self, \*args, \*\*kwargs).
Parameters
----------
func : function
function to apply to the %(klass)s.
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the %(klass)s.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : mapping, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
See Also
--------
DataFrame.apply
DataFrame.applymap
Series.map
Notes
-----
Use ``.pipe`` when chaining together functions that expect
Series, DataFrames or GroupBy objects. Instead of writing
>>> f(g(h(df), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (df.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
"""
@Appender(_shared_docs["pipe"] % _shared_doc_kwargs)
def pipe(self, func, *args, **kwargs):
return com.pipe(self, func, *args, **kwargs)
_shared_docs["aggregate"] = dedent(
"""
Aggregate using one or more operations over the specified axis.
%(versionadded)s
Parameters
----------
func : function, str, list or dict
Function to use for aggregating the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.sum, 'mean']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
scalar, Series or DataFrame
The return can be:
* scalar : when Series.agg is called with single function
* Series : when DataFrame.agg is called with a single function
* DataFrame : when DataFrame.agg is called with several functions
Return scalar, Series or DataFrame.
%(see_also)s
Notes
-----
`agg` is an alias for `aggregate`. Use the alias.
A passed user-defined-function will be passed a Series for evaluation.
%(examples)s"""
)
_shared_docs[
"transform"
] = """
Call ``func`` on self producing a %(klass)s with transformed values
and that has the same axis length as self.
.. versionadded:: 0.20.0
Parameters
----------
func : function, str, list or dict
Function to use for transforming the data. If a function, must either
work when passed a %(klass)s or when passed to %(klass)s.apply.
Accepted combinations are:
- function
- string function name
- list of functions and/or function names, e.g. ``[np.exp. 'sqrt']``
- dict of axis labels -> functions, function names or list of such.
%(axis)s
*args
Positional arguments to pass to `func`.
**kwargs
Keyword arguments to pass to `func`.
Returns
-------
%(klass)s
A %(klass)s that must have the same length as self.
Raises
------
ValueError : If the returned %(klass)s has a different length than self.
See Also
--------
%(klass)s.agg : Only perform aggregating type operations.
%(klass)s.apply : Invoke function on a %(klass)s.
Examples
--------
>>> df = pd.DataFrame({'A': range(3), 'B': range(1, 4)})
>>> df
A B
0 0 1
1 1 2
2 2 3
>>> df.transform(lambda x: x + 1)
A B
0 1 2
1 2 3
2 3 4
Even though the resulting %(klass)s must have the same length as the
input %(klass)s, it is possible to provide several input functions:
>>> s = pd.Series(range(3))
>>> s
0 0
1 1
2 2
dtype: int64
>>> s.transform([np.sqrt, np.exp])
sqrt exp
0 0.000000 1.000000
1 1.000000 2.718282
2 1.414214 7.389056
"""
# ----------------------------------------------------------------------
# Attribute access
def __finalize__(self, other, method=None, **kwargs):
"""
Propagate metadata from other to self.
Parameters
----------
other : the object from which to get the attributes that we are going
to propagate
method : optional, a passed method name ; possibly to take different
types of propagation actions based on this
"""
if isinstance(other, NDFrame):
for name in self._metadata:
object.__setattr__(self, name, getattr(other, name, None))
return self
def __getattr__(self, name):
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
# Note: obj.x will always call obj.__getattribute__('x') prior to
# calling obj.__getattr__('x').
if (
name in self._internal_names_set
or name in self._metadata
or name in self._accessors
):
return object.__getattribute__(self, name)
else:
if self._info_axis._can_hold_identifiers_and_holds_name(name):
return self[name]
return object.__getattribute__(self, name)
def __setattr__(self, name, value):
"""After regular attribute access, try setting the name
This allows simpler access to columns for interactive use.
"""
# first try regular attribute access via __getattribute__, so that
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
except AttributeError:
pass
# if this fails, go on to more involved attribute setting
# (note that this matches __getattr__, above).
if name in self._internal_names_set:
object.__setattr__(self, name, value)
elif name in self._metadata:
object.__setattr__(self, name, value)
else:
try:
existing = getattr(self, name)
if isinstance(existing, Index):
object.__setattr__(self, name, value)
elif name in self._info_axis:
self[name] = value
else:
object.__setattr__(self, name, value)
except (AttributeError, TypeError):
if isinstance(self, ABCDataFrame) and (is_list_like(value)):
warnings.warn(
"Pandas doesn't allow columns to be "
"created via a new attribute name - see "
"https://pandas.pydata.org/pandas-docs/"
"stable/indexing.html#attribute-access",
stacklevel=2,
)
object.__setattr__(self, name, value)
def _dir_additions(self):
""" add the string-like attributes from the info_axis.
If info_axis is a MultiIndex, it's first level values are used.
"""
additions = {
c
for c in self._info_axis.unique(level=0)[:100]
if isinstance(c, str) and c.isidentifier()
}
return super()._dir_additions().union(additions)
# ----------------------------------------------------------------------
# Consolidation of internals
def _protect_consolidate(self, f):
"""Consolidate _data -- if the blocks have changed, then clear the
cache
"""
blocks_before = len(self._data.blocks)
result = f()
if len(self._data.blocks) != blocks_before:
self._clear_item_cache()
return result
def _consolidate_inplace(self):
"""Consolidate data in place and return None"""
def f():
self._data = self._data.consolidate()
self._protect_consolidate(f)
def _consolidate(self, inplace=False):
"""
Compute NDFrame with "consolidated" internals (data of each dtype
grouped together in a single ndarray).
Parameters
----------
inplace : bool, default False
If False return new object, otherwise modify existing object.
Returns
-------
consolidated : same type as caller
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if inplace:
self._consolidate_inplace()
else:
f = lambda: self._data.consolidate()
cons_data = self._protect_consolidate(f)
return self._constructor(cons_data).__finalize__(self)
@property
def _is_mixed_type(self):
f = lambda: self._data.is_mixed_type
return self._protect_consolidate(f)
@property
def _is_numeric_mixed_type(self):
f = lambda: self._data.is_numeric_mixed_type
return self._protect_consolidate(f)
@property
def _is_datelike_mixed_type(self):
f = lambda: self._data.is_datelike_mixed_type
return self._protect_consolidate(f)
def _check_inplace_setting(self, value):
""" check whether we allow in-place setting with this type of value """
if self._is_mixed_type:
if not self._is_numeric_mixed_type:
# allow an actual np.nan thru
try:
if np.isnan(value):
return True
except Exception:
pass
raise TypeError(
"Cannot do inplace boolean setting on "
"mixed-types with a non np.nan value"
)
return True
def _get_numeric_data(self):
return self._constructor(self._data.get_numeric_data()).__finalize__(self)
def _get_bool_data(self):
return self._constructor(self._data.get_bool_data()).__finalize__(self)
# ----------------------------------------------------------------------
# Internal Interface Methods
def as_matrix(self, columns=None):
"""
Convert the frame to its Numpy-array representation.
.. deprecated:: 0.23.0
Use :meth:`DataFrame.values` instead.
Parameters
----------
columns : list, optional, default:None
If None, return all columns, otherwise, returns specified columns.
Returns
-------
values : ndarray
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
See Also
--------
DataFrame.values
Notes
-----
Return is NOT a Numpy-matrix, rather, a Numpy-array.
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcase to
int32. By numpy.find_common_type convention, mixing int64 and uint64
will result in a float64 dtype.
This method is provided for backwards compatibility. Generally,
it is recommended to use '.values'.
"""
warnings.warn(
"Method .as_matrix will be removed in a future version. "
"Use .values instead.",
FutureWarning,
stacklevel=2,
)
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED, items=columns)
@property
def values(self):
"""
Return a Numpy representation of the DataFrame.
.. warning::
We recommend using :meth:`DataFrame.to_numpy` instead.
Only the values in the DataFrame will be returned, the axes labels
will be removed.
Returns
-------
numpy.ndarray
The values of the DataFrame.
See Also
--------
DataFrame.to_numpy : Recommended alternative to this method.
DataFrame.index : Retrieve the index labels.
DataFrame.columns : Retrieving the column names.
Notes
-----
The dtype will be a lower-common-denominator dtype (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen. Use this
with care if you are not dealing with the blocks.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. If dtypes are int32 and uint8, dtype will be upcast to
int32. By :func:`numpy.find_common_type` convention, mixing int64
and uint64 will result in a float64 dtype.
Examples
--------
A DataFrame where all columns are the same type (e.g., int64) results
in an array of the same type.
>>> df = pd.DataFrame({'age': [ 3, 29],
... 'height': [94, 170],
... 'weight': [31, 115]})
>>> df
age height weight
0 3 94 31
1 29 170 115
>>> df.dtypes
age int64
height int64
weight int64
dtype: object
>>> df.values
array([[ 3, 94, 31],
[ 29, 170, 115]], dtype=int64)
A DataFrame with mixed type columns(e.g., str/object, int64, float32)
results in an ndarray of the broadest type that accommodates these
mixed types (e.g., object).
>>> df2 = pd.DataFrame([('parrot', 24.0, 'second'),
... ('lion', 80.5, 1),
... ('monkey', np.nan, None)],
... columns=('name', 'max_speed', 'rank'))
>>> df2.dtypes
name object
max_speed float64
rank object
dtype: object
>>> df2.values
array([['parrot', 24.0, 'second'],
['lion', 80.5, 1],
['monkey', nan, None]], dtype=object)
"""
self._consolidate_inplace()
return self._data.as_array(transpose=self._AXIS_REVERSED)
@property
def _values(self):
"""internal implementation"""
return self.values
@property
def _get_values(self):
# compat
return self.values
def get_values(self):
"""
Return an ndarray after converting sparse values to dense.
.. deprecated:: 0.25.0
Use ``np.asarray(..)`` or :meth:`DataFrame.values` instead.
This is the same as ``.values`` for non-sparse data. For sparse
data contained in a `SparseArray`, the data are first
converted to a dense representation.
Returns
-------
numpy.ndarray
Numpy representation of DataFrame.
See Also
--------
values : Numpy representation of DataFrame.
SparseArray : Container for sparse data.
Examples
--------
>>> df = pd.DataFrame({'a': [1, 2], 'b': [True, False],
... 'c': [1.0, 2.0]})
>>> df
a b c
0 1 True 1.0
1 2 False 2.0
>>> df.get_values()
array([[1, True, 1.0], [2, False, 2.0]], dtype=object)
>>> df = pd.DataFrame({"a": pd.SparseArray([1, None, None]),
... "c": [1.0, 2.0, 3.0]})
>>> df
a c
0 1.0 1.0
1 NaN 2.0
2 NaN 3.0
>>> df.get_values()
array([[ 1., 1.],
[nan, 2.],
[nan, 3.]])
"""
warnings.warn(
"The 'get_values' method is deprecated and will be removed in a "
"future version. Use '.values' or 'np.asarray(..)' instead.",
FutureWarning,
stacklevel=2,
)
return self._internal_get_values()
def _internal_get_values(self):
return self.values
def get_dtype_counts(self):
"""
Return counts of unique dtypes in this object.
.. deprecated:: 0.25.0
Use `.dtypes.value_counts()` instead.
Returns
-------
dtype : Series
Series with the count of columns with each dtype.
See Also
--------
dtypes : Return the dtypes in this object.
Examples
--------
>>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]
>>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])
>>> df
str int float
0 a 1 1.0
1 b 2 2.0
2 c 3 3.0
>>> df.get_dtype_counts()
float64 1
int64 1
object 1
dtype: int64
"""
warnings.warn(
"`get_dtype_counts` has been deprecated and will be "
"removed in a future version. For DataFrames use "
"`.dtypes.value_counts()",
FutureWarning,
stacklevel=2,
)
from pandas import Series
return Series(self._data.get_dtype_counts())
def get_ftype_counts(self):
"""
Return counts of unique ftypes in this object.
.. deprecated:: 0.23.0
This is useful for SparseDataFrame or for DataFrames containing
sparse arrays.
Returns
-------
dtype : Series
Series with the count of columns with each type and
sparsity (dense/sparse).
See Also
--------
ftypes : Return ftypes (indication of sparse/dense and dtype) in
this object.
Examples
--------
>>> a = [['a', 1, 1.0], ['b', 2, 2.0], ['c', 3, 3.0]]
>>> df = pd.DataFrame(a, columns=['str', 'int', 'float'])
>>> df
str int float
0 a 1 1.0
1 b 2 2.0
2 c 3 3.0
>>> df.get_ftype_counts() # doctest: +SKIP
float64:dense 1
int64:dense 1
object:dense 1
dtype: int64
"""
warnings.warn(
"get_ftype_counts is deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
from pandas import Series
return Series(self._data.get_ftype_counts())
@property
def dtypes(self):
"""
Return the dtypes in the DataFrame.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type of each column.
See Also
--------
DataFrame.ftypes : Dtype and sparsity information.
Examples
--------
>>> df = pd.DataFrame({'float': [1.0],
... 'int': [1],
... 'datetime': [pd.Timestamp('20180310')],
... 'string': ['foo']})
>>> df.dtypes
float float64
int int64
datetime datetime64[ns]
string object
dtype: object
"""
from pandas import Series
return Series(self._data.get_dtypes(), index=self._info_axis, dtype=np.object_)
@property
def ftypes(self):
"""
Return the ftypes (indication of sparse/dense and dtype) in DataFrame.
.. deprecated:: 0.25.0
Use :func:`dtypes` instead.
This returns a Series with the data type of each column.
The result's index is the original DataFrame's columns. Columns
with mixed types are stored with the ``object`` dtype. See
:ref:`the User Guide <basics.dtypes>` for more.
Returns
-------
pandas.Series
The data type and indication of sparse/dense of each column.
See Also
--------
DataFrame.dtypes: Series with just dtype information.
SparseDataFrame : Container for sparse tabular data.
Notes
-----
Sparse data should have the same dtypes as its dense representation.
Examples
--------
>>> arr = np.random.RandomState(0).randn(100, 4)
>>> arr[arr < .8] = np.nan
>>> pd.DataFrame(arr).ftypes
0 float64:dense
1 float64:dense
2 float64:dense
3 float64:dense
dtype: object
>>> pd.SparseDataFrame(arr).ftypes # doctest: +SKIP
0 float64:sparse
1 float64:sparse
2 float64:sparse
3 float64:sparse
dtype: object
"""
warnings.warn(
"DataFrame.ftypes is deprecated and will "
"be removed in a future version. "
"Use DataFrame.dtypes instead.",
FutureWarning,
stacklevel=2,
)
from pandas import Series
return Series(self._data.get_ftypes(), index=self._info_axis, dtype=np.object_)
def as_blocks(self, copy=True):
"""
Convert the frame to a dict of dtype -> Constructor Types that each has
a homogeneous dtype.
.. deprecated:: 0.21.0
NOTE: the dtypes of the blocks WILL BE PRESERVED HERE (unlike in
as_matrix)
Parameters
----------
copy : bool, default True
Returns
-------
dict
Mapping dtype -> Constructor Types.
"""
warnings.warn(
"as_blocks is deprecated and will be removed in a future version",
FutureWarning,
stacklevel=2,
)
return self._to_dict_of_blocks(copy=copy)
@property
def blocks(self):
"""
Internal property, property synonym for as_blocks().
.. deprecated:: 0.21.0
"""
return self.as_blocks()
def _to_dict_of_blocks(self, copy=True):
"""
Return a dict of dtype -> Constructor Types that
each is a homogeneous dtype.
Internal ONLY
"""
return {
k: self._constructor(v).__finalize__(self)
for k, v, in self._data.to_dict(copy=copy).items()
}
def astype(self, dtype, copy=True, errors="raise", **kwargs):
"""
Cast a pandas object to a specified dtype ``dtype``.
Parameters
----------
dtype : data type, or dict of column name -> data type
Use a numpy.dtype or Python type to cast entire pandas object to
the same type. Alternatively, use {col: dtype, ...}, where col is a
column label and dtype is a numpy.dtype or Python type to cast one
or more of the DataFrame's columns to column-specific types.
copy : bool, default True
Return a copy when ``copy=True`` (be very careful setting
``copy=False`` as changes to values then may propagate to other
pandas objects).
errors : {'raise', 'ignore'}, default 'raise'
Control raising of exceptions on invalid data for provided dtype.
- ``raise`` : allow exceptions to be raised
- ``ignore`` : suppress exceptions. On error return original object
.. versionadded:: 0.20.0
kwargs : keyword arguments to pass on to the constructor
Returns
-------
casted : same type as caller
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to a numeric type.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
Create a DataFrame:
>>> d = {'col1': [1, 2], 'col2': [3, 4]}
>>> df = pd.DataFrame(data=d)
>>> df.dtypes
col1 int64
col2 int64
dtype: object
Cast all columns to int32:
>>> df.astype('int32').dtypes
col1 int32
col2 int32
dtype: object
Cast col1 to int32 using a dictionary:
>>> df.astype({'col1': 'int32'}).dtypes
col1 int32
col2 int64
dtype: object
Create a series:
>>> ser = pd.Series([1, 2], dtype='int32')
>>> ser
0 1
1 2
dtype: int32
>>> ser.astype('int64')
0 1
1 2
dtype: int64
Convert to categorical type:
>>> ser.astype('category')
0 1
1 2
dtype: category
Categories (2, int64): [1, 2]
Convert to ordered categorical type with custom ordering:
>>> cat_dtype = pd.api.types.CategoricalDtype(
... categories=[2, 1], ordered=True)
>>> ser.astype(cat_dtype)
0 1
1 2
dtype: category
Categories (2, int64): [2 < 1]
Note that using ``copy=False`` and changing data on a new
pandas object may propagate changes:
>>> s1 = pd.Series([1,2])
>>> s2 = s1.astype('int64', copy=False)
>>> s2[0] = 10
>>> s1 # note that s1[0] has changed too
0 10
1 2
dtype: int64
"""
if is_dict_like(dtype):
if self.ndim == 1: # i.e. Series
if len(dtype) > 1 or self.name not in dtype:
raise KeyError(
"Only the Series name can be used for "
"the key in Series dtype mappings."
)
new_type = dtype[self.name]
return self.astype(new_type, copy, errors, **kwargs)
for col_name in dtype.keys():
if col_name not in self:
raise KeyError(
"Only a column name can be used for the "
"key in a dtype mappings argument."
)
results = []
for col_name, col in self.items():
if col_name in dtype:
results.append(
col.astype(
dtype=dtype[col_name], copy=copy, errors=errors, **kwargs
)
)
else:
results.append(results.append(col.copy() if copy else col))
elif is_extension_array_dtype(dtype) and self.ndim > 1:
# GH 18099/22869: columnwise conversion to extension dtype
# GH 24704: use iloc to handle duplicate column names
results = (
self.iloc[:, i].astype(dtype, copy=copy)
for i in range(len(self.columns))
)
else:
# else, only a single dtype is given
new_data = self._data.astype(
dtype=dtype, copy=copy, errors=errors, **kwargs
)
return self._constructor(new_data).__finalize__(self)
# GH 19920: retain column metadata after concat
result = pd.concat(results, axis=1, copy=False)
result.columns = self.columns
return result
def copy(self, deep=True):
"""
Make a copy of this object's indices and data.
When ``deep=True`` (default), a new object will be created with a
copy of the calling object's data and indices. Modifications to
the data or indices of the copy will not be reflected in the
original object (see notes below).
When ``deep=False``, a new object will be created without copying
the calling object's data or index (only references to the data
and index are copied). Any changes to the data of the original
will be reflected in the shallow copy (and vice versa).
Parameters
----------
deep : bool, default True
Make a deep copy, including a copy of the data and the indices.
With ``deep=False`` neither the indices nor the data are copied.
Returns
-------
copy : Series or DataFrame
Object type matches caller.
Notes
-----
When ``deep=True``, data is copied but actual Python objects
will not be copied recursively, only the reference to the object.
This is in contrast to `copy.deepcopy` in the Standard Library,
which recursively copies object data (see examples below).
While ``Index`` objects are copied when ``deep=True``, the underlying
numpy array is not copied for performance reasons. Since ``Index`` is
immutable, the underlying data can be safely shared and a copy
is not needed.
Examples
--------
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> s
a 1
b 2
dtype: int64
>>> s_copy = s.copy()
>>> s_copy
a 1
b 2
dtype: int64
**Shallow copy versus default (deep) copy:**
>>> s = pd.Series([1, 2], index=["a", "b"])
>>> deep = s.copy()
>>> shallow = s.copy(deep=False)
Shallow copy shares data and index with original.
>>> s is shallow
False
>>> s.values is shallow.values and s.index is shallow.index
True
Deep copy has own copy of data and index.
>>> s is deep
False
>>> s.values is deep.values or s.index is deep.index
False
Updates to the data shared by shallow copy and original is reflected
in both; deep copy remains unchanged.
>>> s[0] = 3
>>> shallow[1] = 4
>>> s
a 3
b 4
dtype: int64
>>> shallow
a 3
b 4
dtype: int64
>>> deep
a 1
b 2
dtype: int64
Note that when copying an object containing Python objects, a deep copy
will copy the data, but will not do so recursively. Updating a nested
data object will be reflected in the deep copy.
>>> s = pd.Series([[1, 2], [3, 4]])
>>> deep = s.copy()
>>> s[0][0] = 10
>>> s
0 [10, 2]
1 [3, 4]
dtype: object
>>> deep
0 [10, 2]
1 [3, 4]
dtype: object
"""
data = self._data.copy(deep=deep)
return self._constructor(data).__finalize__(self)
def __copy__(self, deep=True):
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
"""
Parameters
----------
memo, default None
Standard signature. Unused
"""
if memo is None:
memo = {}
return self.copy(deep=True)
def _convert(
self, datetime=False, numeric=False, timedelta=False, coerce=False, copy=True
):
"""
Attempt to infer better dtype for object columns
Parameters
----------
datetime : bool, default False
If True, convert to date where possible.
numeric : bool, default False
If True, attempt to convert to numbers (including strings), with
unconvertible values becoming NaN.
timedelta : bool, default False
If True, convert to timedelta where possible.
coerce : bool, default False
If True, force conversion with unconvertible values converted to
nulls (NaN or NaT).
copy : bool, default True
If True, return a copy even if no copy is necessary (e.g. no
conversion was done). Note: This is meant for internal use, and
should not be confused with inplace.
Returns
-------
converted : same as input object
"""
validate_bool_kwarg(datetime, "datetime")
validate_bool_kwarg(numeric, "numeric")
validate_bool_kwarg(timedelta, "timedelta")
validate_bool_kwarg(coerce, "coerce")
validate_bool_kwarg(copy, "copy")
return self._constructor(
self._data.convert(
datetime=datetime,
numeric=numeric,
timedelta=timedelta,
coerce=coerce,
copy=copy,
)
).__finalize__(self)
def infer_objects(self):
"""
Attempt to infer better dtypes for object columns.
Attempts soft conversion of object-dtyped
columns, leaving non-object and unconvertible
columns unchanged. The inference rules are the
same as during normal Series/DataFrame construction.
.. versionadded:: 0.21.0
Returns
-------
converted : same type as input object
See Also
--------
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
to_numeric : Convert argument to numeric type.
Examples
--------
>>> df = pd.DataFrame({"A": ["a", 1, 2, 3]})
>>> df = df.iloc[1:]
>>> df
A
1 1
2 2
3 3
>>> df.dtypes
A object
dtype: object
>>> df.infer_objects().dtypes
A int64
dtype: object
"""
# numeric=False necessary to only soft convert;
# python objects will still be converted to
# native numpy numeric types
return self._constructor(
self._data.convert(
datetime=True, numeric=False, timedelta=True, coerce=False, copy=True
)
).__finalize__(self)
# ----------------------------------------------------------------------
# Filling NA's
def fillna(
self,
value=None,
method=None,
axis=None,
inplace=False,
limit=None,
downcast=None,
):
"""
Fill NA/NaN values using the specified method.
Parameters
----------
value : scalar, dict, Series, or DataFrame
Value to use to fill holes (e.g. 0), alternately a
dict/Series/DataFrame of values specifying which value to use for
each index (for a Series) or column (for a DataFrame). Values not
in the dict/Series/DataFrame will not be filled. This value cannot
be a list.
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use next valid observation to fill gap.
axis : %(axes_single_arg)s
Axis along which to fill missing values.
inplace : bool, default False
If True, fill in-place. Note: this will modify any
other views on this object (e.g., a no-copy slice for a column in a
DataFrame).
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
downcast : dict, default is None
A dict of item->dtype of what to downcast if possible,
or the string 'infer' which will try to downcast to an appropriate
equal type (e.g. float64 to int64 if possible).
Returns
-------
%(klass)s
Object with missing values filled.
See Also
--------
interpolate : Fill NaN values using interpolation.
reindex : Conform object to new index.
asfreq : Convert TimeSeries to specified frequency.
Examples
--------
>>> df = pd.DataFrame([[np.nan, 2, np.nan, 0],
... [3, 4, np.nan, 1],
... [np.nan, np.nan, np.nan, 5],
... [np.nan, 3, np.nan, 4]],
... columns=list('ABCD'))
>>> df
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 NaN NaN NaN 5
3 NaN 3.0 NaN 4
Replace all NaN elements with 0s.
>>> df.fillna(0)
A B C D
0 0.0 2.0 0.0 0
1 3.0 4.0 0.0 1
2 0.0 0.0 0.0 5
3 0.0 3.0 0.0 4
We can also propagate non-null values forward or backward.
>>> df.fillna(method='ffill')
A B C D
0 NaN 2.0 NaN 0
1 3.0 4.0 NaN 1
2 3.0 4.0 NaN 5
3 3.0 3.0 NaN 4
Replace all NaN elements in column 'A', 'B', 'C', and 'D', with 0, 1,
2, and 3 respectively.
>>> values = {'A': 0, 'B': 1, 'C': 2, 'D': 3}
>>> df.fillna(value=values)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 2.0 1
2 0.0 1.0 2.0 5
3 0.0 3.0 2.0 4
Only replace the first NaN element.
>>> df.fillna(value=values, limit=1)
A B C D
0 0.0 2.0 2.0 0
1 3.0 4.0 NaN 1
2 NaN 1.0 NaN 5
3 NaN 3.0 NaN 4
"""
inplace = validate_bool_kwarg(inplace, "inplace")
value, method = validate_fillna_kwargs(value, method)
self._consolidate_inplace()
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
if value is None:
if self._is_mixed_type and axis == 1:
if inplace:
raise NotImplementedError()
result = self.T.fillna(method=method, limit=limit).T
# need to downcast here because of all of the transposes
result._data = result._data.downcast()
return result
new_data = self._data.interpolate(
method=method,
axis=axis,
limit=limit,
inplace=inplace,
coerce=True,
downcast=downcast,
)
else:
if len(self._get_axis(axis)) == 0:
return self
if self.ndim == 1:
if isinstance(value, (dict, ABCSeries)):
from pandas import Series
value = Series(value)
elif not is_list_like(value):
pass
else:
raise TypeError(
'"value" parameter must be a scalar, dict '
"or Series, but you passed a "
'"{0}"'.format(type(value).__name__)
)
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, (dict, ABCSeries)):
if axis == 1:
raise NotImplementedError(
"Currently only can fill "
"with dict/Series column "
"by column"
)
result = self if inplace else self.copy()
for k, v in value.items():
if k not in result:
continue
obj = result[k]
obj.fillna(v, limit=limit, inplace=True, downcast=downcast)
return result if not inplace else None
elif not is_list_like(value):
new_data = self._data.fillna(
value=value, limit=limit, inplace=inplace, downcast=downcast
)
elif isinstance(value, ABCDataFrame) and self.ndim == 2:
new_data = self.where(self.notna(), value)
else:
raise ValueError("invalid fill value with a %s" % type(value))
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna` with ``method='ffill'``.
Returns
-------
%(klass)s
Object with missing values filled.
"""
return self.fillna(
method="ffill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
"""
Synonym for :meth:`DataFrame.fillna` with ``method='bfill'``.
Returns
-------
%(klass)s
Object with missing values filled.
"""
return self.fillna(
method="bfill", axis=axis, inplace=inplace, limit=limit, downcast=downcast
)
_shared_docs[
"replace"
] = """
Replace values given in `to_replace` with `value`.
Values of the %(klass)s are replaced with other values dynamically.
This differs from updating with ``.loc`` or ``.iloc``, which require
you to specify a location to update with some value.
Parameters
----------
to_replace : str, regex, list, dict, Series, int, float, or None
How to find the values that will be replaced.
* numeric, str or regex:
- numeric: numeric values equal to `to_replace` will be
replaced with `value`
- str: string exactly matching `to_replace` will be replaced
with `value`
- regex: regexs matching `to_replace` will be replaced with
`value`
* list of str, regex, or numeric:
- First, if `to_replace` and `value` are both lists, they
**must** be the same length.
- Second, if ``regex=True`` then all of the strings in **both**
lists will be interpreted as regexs otherwise they will match
directly. This doesn't matter much for `value` since there
are only a few possible substitution regexes you can use.
- str, regex and numeric rules apply as above.
* dict:
- Dicts can be used to specify different replacement values
for different existing values. For example,
``{'a': 'b', 'y': 'z'}`` replaces the value 'a' with 'b' and
'y' with 'z'. To use a dict in this way the `value`
parameter should be `None`.
- For a DataFrame a dict can specify that different values
should be replaced in different columns. For example,
``{'a': 1, 'b': 'z'}`` looks for the value 1 in column 'a'
and the value 'z' in column 'b' and replaces these values
with whatever is specified in `value`. The `value` parameter
should not be ``None`` in this case. You can treat this as a
special case of passing two lists except that you are
specifying the column to search in.
- For a DataFrame nested dictionaries, e.g.,
``{'a': {'b': np.nan}}``, are read as follows: look in column
'a' for the value 'b' and replace it with NaN. The `value`
parameter should be ``None`` to use a nested dict in this
way. You can nest regular expressions as well. Note that
column names (the top-level dictionary keys in a nested
dictionary) **cannot** be regular expressions.
* None:
- This means that the `regex` argument must be a string,
compiled regular expression, or list, dict, ndarray or
Series of such elements. If `value` is also ``None`` then
this **must** be a nested dictionary or Series.
See the examples section for examples of each of these.
value : scalar, dict, list, str, regex, default None
Value to replace any values matching `to_replace` with.
For a DataFrame a dict of values can be used to specify which
value to use for each column (columns not in the dict will not be
filled). Regular expressions, strings and lists or dicts of such
objects are also allowed.
inplace : bool, default False
If True, in place. Note: this will modify any
other views on this object (e.g. a column from a DataFrame).
Returns the caller if this is True.
limit : int, default None
Maximum size gap to forward or backward fill.
regex : bool or same types as `to_replace`, default False
Whether to interpret `to_replace` and/or `value` as regular
expressions. If this is ``True`` then `to_replace` *must* be a
string. Alternatively, this could be a regular expression or a
list, dict, or array of regular expressions in which case
`to_replace` must be ``None``.
method : {'pad', 'ffill', 'bfill', `None`}
The method to use when for replacement, when `to_replace` is a
scalar, list or tuple and `value` is ``None``.
.. versionchanged:: 0.23.0
Added to DataFrame.
Returns
-------
%(klass)s
Object after replacement.
Raises
------
AssertionError
* If `regex` is not a ``bool`` and `to_replace` is not
``None``.
TypeError
* If `to_replace` is a ``dict`` and `value` is not a ``list``,
``dict``, ``ndarray``, or ``Series``
* If `to_replace` is ``None`` and `regex` is not compilable
into a regular expression or is a list, dict, ndarray, or
Series.
* When replacing multiple ``bool`` or ``datetime64`` objects and
the arguments to `to_replace` does not match the type of the
value being replaced
ValueError
* If a ``list`` or an ``ndarray`` is passed to `to_replace` and
`value` but they are not the same length.
See Also
--------
%(klass)s.fillna : Fill NA values.
%(klass)s.where : Replace values based on boolean condition.
Series.str.replace : Simple string replacement.
Notes
-----
* Regex substitution is performed under the hood with ``re.sub``. The
rules for substitution for ``re.sub`` are the same.
* Regular expressions will only substitute on strings, meaning you
cannot provide, for example, a regular expression matching floating
point numbers and expect the columns in your frame that have a
numeric dtype to be matched. However, if those floating point
numbers *are* strings, then you can do this.
* This method has *a lot* of options. You are encouraged to experiment
and play with this method to gain intuition about how it works.
* When dict is used as the `to_replace` value, it is like
key(s) in the dict are the to_replace part and
value(s) in the dict are the value parameter.
Examples
--------
**Scalar `to_replace` and `value`**
>>> s = pd.Series([0, 1, 2, 3, 4])
>>> s.replace(0, 5)
0 5
1 1
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame({'A': [0, 1, 2, 3, 4],
... 'B': [5, 6, 7, 8, 9],
... 'C': ['a', 'b', 'c', 'd', 'e']})
>>> df.replace(0, 5)
A B C
0 5 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
**List-like `to_replace`**
>>> df.replace([0, 1, 2, 3], 4)
A B C
0 4 5 a
1 4 6 b
2 4 7 c
3 4 8 d
4 4 9 e
>>> df.replace([0, 1, 2, 3], [4, 3, 2, 1])
A B C
0 4 5 a
1 3 6 b
2 2 7 c
3 1 8 d
4 4 9 e
>>> s.replace([1, 2], method='bfill')
0 0
1 3
2 3
3 3
4 4
dtype: int64
**dict-like `to_replace`**
>>> df.replace({0: 10, 1: 100})
A B C
0 10 5 a
1 100 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': 0, 'B': 5}, 100)
A B C
0 100 100 a
1 1 6 b
2 2 7 c
3 3 8 d
4 4 9 e
>>> df.replace({'A': {0: 100, 4: 400}})
A B C
0 100 5 a
1 1 6 b
2 2 7 c
3 3 8 d
4 400 9 e
**Regular expression `to_replace`**
>>> df = pd.DataFrame({'A': ['bat', 'foo', 'bait'],
... 'B': ['abc', 'bar', 'xyz']})
>>> df.replace(to_replace=r'^ba.$', value='new', regex=True)
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace({'A': r'^ba.$'}, {'A': 'new'}, regex=True)
A B
0 new abc
1 foo bar
2 bait xyz
>>> df.replace(regex=r'^ba.$', value='new')
A B
0 new abc
1 foo new
2 bait xyz
>>> df.replace(regex={r'^ba.$': 'new', 'foo': 'xyz'})
A B
0 new abc
1 xyz new
2 bait xyz
>>> df.replace(regex=[r'^ba.$', 'foo'], value='new')
A B
0 new abc
1 new new
2 bait xyz
Note that when replacing multiple ``bool`` or ``datetime64`` objects,
the data types in the `to_replace` parameter must match the data
type of the value being replaced:
>>> df = pd.DataFrame({'A': [True, False, True],
... 'B': [False, True, False]})
>>> df.replace({'a string': 'new value', True: False}) # raises
Traceback (most recent call last):
...
TypeError: Cannot compare types 'ndarray(dtype=bool)' and 'str'
This raises a ``TypeError`` because one of the ``dict`` keys is not of
the correct type for replacement.
Compare the behavior of ``s.replace({'a': None})`` and
``s.replace('a', None)`` to understand the peculiarities
of the `to_replace` parameter:
>>> s = pd.Series([10, 'a', 'a', 'b', 'a'])
When one uses a dict as the `to_replace` value, it is like the
value(s) in the dict are equal to the `value` parameter.
``s.replace({'a': None})`` is equivalent to
``s.replace(to_replace={'a': None}, value=None, method=None)``:
>>> s.replace({'a': None})
0 10
1 None
2 None
3 b
4 None
dtype: object
When ``value=None`` and `to_replace` is a scalar, list or
tuple, `replace` uses the method parameter (default 'pad') to do the
replacement. So this is why the 'a' values are being replaced by 10
in rows 1 and 2 and 'b' in row 4 in this case.
The command ``s.replace('a', None)`` is actually equivalent to
``s.replace(to_replace='a', value=None, method='pad')``:
>>> s.replace('a', None)
0 10
1 10
2 10
3 b
4 b
dtype: object
"""
@Appender(_shared_docs["replace"] % _shared_doc_kwargs)
def replace(
self,
to_replace=None,
value=None,
inplace=False,
limit=None,
regex=False,
method="pad",
):
inplace = validate_bool_kwarg(inplace, "inplace")
if not is_bool(regex) and to_replace is not None:
raise AssertionError("'to_replace' must be 'None' if 'regex' is not a bool")
self._consolidate_inplace()
if value is None:
# passing a single value that is scalar like
# when value is None (GH5319), for compat
if not is_dict_like(to_replace) and not is_dict_like(regex):
to_replace = [to_replace]
if isinstance(to_replace, (tuple, list)):
if isinstance(self, ABCDataFrame):
return self.apply(
_single_replace, args=(to_replace, method, inplace, limit)
)
return _single_replace(self, to_replace, method, inplace, limit)
if not is_dict_like(to_replace):
if not is_dict_like(regex):
raise TypeError(
'If "to_replace" and "value" are both None'
' and "to_replace" is not a list, then '
"regex must be a mapping"
)
to_replace = regex
regex = True
items = list(to_replace.items())
keys, values = zip(*items) if items else ([], [])
are_mappings = [is_dict_like(v) for v in values]
if any(are_mappings):
if not all(are_mappings):
raise TypeError(
"If a nested mapping is passed, all values"
" of the top level mapping must be "
"mappings"
)
# passed a nested dict/Series
to_rep_dict = {}
value_dict = {}
for k, v in items:
keys, values = list(zip(*v.items())) or ([], [])
to_rep_dict[k] = list(keys)
value_dict[k] = list(values)
to_replace, value = to_rep_dict, value_dict
else:
to_replace, value = keys, values
return self.replace(
to_replace, value, inplace=inplace, limit=limit, regex=regex
)
else:
# need a non-zero len on all axes
if not self.size:
return self
new_data = self._data
if is_dict_like(to_replace):
if is_dict_like(value): # {'A' : NA} -> {'A' : 0}
res = self if inplace else self.copy()
for c, src in to_replace.items():
if c in value and c in self:
# object conversion is handled in
# series.replace which is called recursively
res[c] = res[c].replace(
to_replace=src,
value=value[c],
inplace=False,
regex=regex,
)
return None if inplace else res
# {'A': NA} -> 0
elif not is_list_like(value):
keys = [(k, src) for k, src in to_replace.items() if k in self]
keys_len = len(keys) - 1
for i, (k, src) in enumerate(keys):
convert = i == keys_len
new_data = new_data.replace(
to_replace=src,
value=value,
filter=[k],
inplace=inplace,
regex=regex,
convert=convert,
)
else:
raise TypeError("value argument must be scalar, dict, or Series")
elif is_list_like(to_replace): # [NA, ''] -> [0, 'missing']
if is_list_like(value):
if len(to_replace) != len(value):
raise ValueError(
"Replacement lists must match "
"in length. Expecting %d got %d "
% (len(to_replace), len(value))
)
new_data = self._data.replace_list(
src_list=to_replace,
dest_list=value,
inplace=inplace,
regex=regex,
)
else: # [NA, ''] -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
elif to_replace is None:
if not (
is_re_compilable(regex)
or is_list_like(regex)
or is_dict_like(regex)
):
raise TypeError(
"'regex' must be a string or a compiled "
"regular expression or a list or dict of "
"strings or regular expressions, you "
"passed a"
" {0!r}".format(type(regex).__name__)
)
return self.replace(
regex, value, inplace=inplace, limit=limit, regex=True
)
else:
# dest iterable dict-like
if is_dict_like(value): # NA -> {'A' : 0, 'B' : -1}
new_data = self._data
for k, v in value.items():
if k in self:
new_data = new_data.replace(
to_replace=to_replace,
value=v,
filter=[k],
inplace=inplace,
regex=regex,
)
elif not is_list_like(value): # NA -> 0
new_data = self._data.replace(
to_replace=to_replace, value=value, inplace=inplace, regex=regex
)
else:
msg = ('Invalid "to_replace" type: ' "{0!r}").format(
type(to_replace).__name__
)
raise TypeError(msg) # pragma: no cover
if inplace:
self._update_inplace(new_data)
else:
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"interpolate"
] = """
Please note that only ``method='linear'`` is supported for
DataFrame/Series with a MultiIndex.
Parameters
----------
method : str, default 'linear'
Interpolation technique to use. One of:
* 'linear': Ignore the index and treat the values as equally
spaced. This is the only method supported on MultiIndexes.
* 'time': Works on daily and higher resolution data to interpolate
given length of interval.
* 'index', 'values': use the actual numerical values of the index.
* 'pad': Fill in NaNs using existing values.
* 'nearest', 'zero', 'slinear', 'quadratic', 'cubic', 'spline',
'barycentric', 'polynomial': Passed to
`scipy.interpolate.interp1d`. These methods use the numerical
values of the index. Both 'polynomial' and 'spline' require that
you also specify an `order` (int), e.g.
``df.interpolate(method='polynomial', order=5)``.
* 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
Wrappers around the SciPy interpolation methods of similar
names. See `Notes`.
* 'from_derivatives': Refers to
`scipy.interpolate.BPoly.from_derivatives` which
replaces 'piecewise_polynomial' interpolation method in
scipy 0.18.
axis : {0 or 'index', 1 or 'columns', None}, default None
Axis to interpolate along.
limit : int, optional
Maximum number of consecutive NaNs to fill. Must be greater than
0.
inplace : bool, default False
Update the data in place if possible.
limit_direction : {'forward', 'backward', 'both'}, default 'forward'
If limit is specified, consecutive NaNs will be filled in this
direction.
limit_area : {`None`, 'inside', 'outside'}, default None
If limit is specified, consecutive NaNs will be filled with this
restriction.
* ``None``: No fill restriction.
* 'inside': Only fill NaNs surrounded by valid values
(interpolate).
* 'outside': Only fill NaNs outside valid values (extrapolate).
.. versionadded:: 0.23.0
downcast : optional, 'infer' or None, defaults to None
Downcast dtypes if possible.
**kwargs
Keyword arguments to pass on to the interpolating function.
Returns
-------
Series or DataFrame
Returns the same object type as the caller, interpolated at
some or all ``NaN`` values.
See Also
--------
fillna : Fill missing values using different methods.
scipy.interpolate.Akima1DInterpolator : Piecewise cubic polynomials
(Akima interpolator).
scipy.interpolate.BPoly.from_derivatives : Piecewise polynomial in the
Bernstein basis.
scipy.interpolate.interp1d : Interpolate a 1-D function.
scipy.interpolate.KroghInterpolator : Interpolate polynomial (Krogh
interpolator).
scipy.interpolate.PchipInterpolator : PCHIP 1-d monotonic cubic
interpolation.
scipy.interpolate.CubicSpline : Cubic spline data interpolator.
Notes
-----
The 'krogh', 'piecewise_polynomial', 'spline', 'pchip' and 'akima'
methods are wrappers around the respective SciPy implementations of
similar names. These use the actual numerical values of the index.
For more information on their behavior, see the
`SciPy documentation
<http://docs.scipy.org/doc/scipy/reference/interpolate.html#univariate-interpolation>`__
and `SciPy tutorial
<http://docs.scipy.org/doc/scipy/reference/tutorial/interpolate.html>`__.
Examples
--------
Filling in ``NaN`` in a :class:`~pandas.Series` via linear
interpolation.
>>> s = pd.Series([0, 1, np.nan, 3])
>>> s
0 0.0
1 1.0
2 NaN
3 3.0
dtype: float64
>>> s.interpolate()
0 0.0
1 1.0
2 2.0
3 3.0
dtype: float64
Filling in ``NaN`` in a Series by padding, but filling at most two
consecutive ``NaN`` at a time.
>>> s = pd.Series([np.nan, "single_one", np.nan,
... "fill_two_more", np.nan, np.nan, np.nan,
... 4.71, np.nan])
>>> s
0 NaN
1 single_one
2 NaN
3 fill_two_more
4 NaN
5 NaN
6 NaN
7 4.71
8 NaN
dtype: object
>>> s.interpolate(method='pad', limit=2)
0 NaN
1 single_one
2 single_one
3 fill_two_more
4 fill_two_more
5 fill_two_more
6 NaN
7 4.71
8 4.71
dtype: object
Filling in ``NaN`` in a Series via polynomial interpolation or splines:
Both 'polynomial' and 'spline' methods require that you also specify
an ``order`` (int).
>>> s = pd.Series([0, 2, np.nan, 8])
>>> s.interpolate(method='polynomial', order=2)
0 0.000000
1 2.000000
2 4.666667
3 8.000000
dtype: float64
Fill the DataFrame forward (that is, going down) along each column
using linear interpolation.
Note how the last entry in column 'a' is interpolated differently,
because there is no entry after it to use for interpolation.
Note how the first entry in column 'b' remains ``NaN``, because there
is no entry before it to use for interpolation.
>>> df = pd.DataFrame([(0.0, np.nan, -1.0, 1.0),
... (np.nan, 2.0, np.nan, np.nan),
... (2.0, 3.0, np.nan, 9.0),
... (np.nan, 4.0, -4.0, 16.0)],
... columns=list('abcd'))
>>> df
a b c d
0 0.0 NaN -1.0 1.0
1 NaN 2.0 NaN NaN
2 2.0 3.0 NaN 9.0
3 NaN 4.0 -4.0 16.0
>>> df.interpolate(method='linear', limit_direction='forward', axis=0)
a b c d
0 0.0 NaN -1.0 1.0
1 1.0 2.0 -2.0 5.0
2 2.0 3.0 -3.0 9.0
3 2.0 4.0 -4.0 16.0
Using polynomial interpolation.
>>> df['d'].interpolate(method='polynomial', order=2)
0 1.0
1 4.0
2 9.0
3 16.0
Name: d, dtype: float64
"""
@Appender(_shared_docs["interpolate"] % _shared_doc_kwargs)
def interpolate(
self,
method="linear",
axis=0,
limit=None,
inplace=False,
limit_direction="forward",
limit_area=None,
downcast=None,
**kwargs
):
"""
Interpolate values according to different methods.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
if axis == 0:
ax = self._info_axis_name
_maybe_transposed_self = self
elif axis == 1:
_maybe_transposed_self = self.T
ax = 1
else:
_maybe_transposed_self = self
ax = _maybe_transposed_self._get_axis_number(ax)
if _maybe_transposed_self.ndim == 2:
alt_ax = 1 - ax
else:
alt_ax = ax
if isinstance(_maybe_transposed_self.index, MultiIndex) and method != "linear":
raise ValueError(
"Only `method=linear` interpolation is supported on MultiIndexes."
)
if _maybe_transposed_self._data.get_dtype_counts().get("object") == len(
_maybe_transposed_self.T
):
raise TypeError(
"Cannot interpolate with all object-dtype columns "
"in the DataFrame. Try setting at least one "
"column to a numeric dtype."
)
# create/use the index
if method == "linear":
# prior default
index = np.arange(len(_maybe_transposed_self._get_axis(alt_ax)))
else:
index = _maybe_transposed_self._get_axis(alt_ax)
methods = {"index", "values", "nearest", "time"}
is_numeric_or_datetime = (
is_numeric_dtype(index)
or is_datetime64_any_dtype(index)
or is_timedelta64_dtype(index)
)
if method not in methods and not is_numeric_or_datetime:
raise ValueError(
"Index column must be numeric or datetime type when "
"using {method} method other than linear. "
"Try setting a numeric or datetime index column before "
"interpolating.".format(method=method)
)
if isna(index).any():
raise NotImplementedError(
"Interpolation with NaNs in the index "
"has not been implemented. Try filling "
"those NaNs before interpolating."
)
data = _maybe_transposed_self._data
new_data = data.interpolate(
method=method,
axis=ax,
index=index,
values=_maybe_transposed_self,
limit=limit,
limit_direction=limit_direction,
limit_area=limit_area,
inplace=inplace,
downcast=downcast,
**kwargs
)
if inplace:
if axis == 1:
new_data = self._constructor(new_data).T._data
self._update_inplace(new_data)
else:
res = self._constructor(new_data).__finalize__(self)
if axis == 1:
res = res.T
return res
# ----------------------------------------------------------------------
# Timeseries methods Methods
def asof(self, where, subset=None):
"""
Return the last row(s) without any NaNs before `where`.
The last row (for each element in `where`, if list) without any
NaN is taken.
In case of a :class:`~pandas.DataFrame`, the last row without NaN
considering only the subset of columns (if not `None`)
If there is no good value, NaN is returned for a Series or
a Series of NaN values for a DataFrame
Parameters
----------
where : date or array-like of dates
Date(s) before which the last row(s) are returned.
subset : str or array-like of str, default `None`
For DataFrame, if not `None`, only use these columns to
check for NaNs.
Returns
-------
scalar, Series, or DataFrame
The return can be:
* scalar : when `self` is a Series and `where` is a scalar
* Series: when `self` is a Series and `where` is an array-like,
or when `self` is a DataFrame and `where` is a scalar
* DataFrame : when `self` is a DataFrame and `where` is an
array-like
Return scalar, Series, or DataFrame.
See Also
--------
merge_asof : Perform an asof merge. Similar to left join.
Notes
-----
Dates are assumed to be sorted. Raises if this is not the case.
Examples
--------
A Series and a scalar `where`.
>>> s = pd.Series([1, 2, np.nan, 4], index=[10, 20, 30, 40])
>>> s
10 1.0
20 2.0
30 NaN
40 4.0
dtype: float64
>>> s.asof(20)
2.0
For a sequence `where`, a Series is returned. The first value is
NaN, because the first element of `where` is before the first
index value.
>>> s.asof([5, 20])
5 NaN
20 2.0
dtype: float64
Missing values are not considered. The following is ``2.0``, not
NaN, even though NaN is at the index location for ``30``.
>>> s.asof(30)
2.0
Take all columns into consideration
>>> df = pd.DataFrame({'a': [10, 20, 30, 40, 50],
... 'b': [None, None, None, None, 500]},
... index=pd.DatetimeIndex(['2018-02-27 09:01:00',
... '2018-02-27 09:02:00',
... '2018-02-27 09:03:00',
... '2018-02-27 09:04:00',
... '2018-02-27 09:05:00']))
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']))
a b
2018-02-27 09:03:30 NaN NaN
2018-02-27 09:04:30 NaN NaN
Take a single column into consideration
>>> df.asof(pd.DatetimeIndex(['2018-02-27 09:03:30',
... '2018-02-27 09:04:30']),
... subset=['a'])
a b
2018-02-27 09:03:30 30.0 NaN
2018-02-27 09:04:30 40.0 NaN
"""
if isinstance(where, str):
where = Timestamp(where)
if not self.index.is_monotonic:
raise ValueError("asof requires a sorted index")
is_series = isinstance(self, ABCSeries)
if is_series:
if subset is not None:
raise ValueError("subset is not valid for Series")
else:
if subset is None:
subset = self.columns
if not is_list_like(subset):
subset = [subset]
is_list = is_list_like(where)
if not is_list:
start = self.index[0]
if isinstance(self.index, PeriodIndex):
where = Period(where, freq=self.index.freq).ordinal
start = start.ordinal
if where < start:
if not is_series:
from pandas import Series
return Series(index=self.columns, name=where)
return np.nan
# It's always much faster to use a *while* loop here for
# Series than pre-computing all the NAs. However a
# *while* loop is extremely expensive for DataFrame
# so we later pre-compute all the NAs and use the same
# code path whether *where* is a scalar or list.
# See PR: https://github.com/pandas-dev/pandas/pull/14476
if is_series:
loc = self.index.searchsorted(where, side="right")
if loc > 0:
loc -= 1
values = self._values
while loc > 0 and isna(values[loc]):
loc -= 1
return values[loc]
if not isinstance(where, Index):
where = Index(where) if is_list else Index([where])
nulls = self.isna() if is_series else self[subset].isna().any(1)
if nulls.all():
if is_series:
return self._constructor(np.nan, index=where, name=self.name)
elif is_list:
from pandas import DataFrame
return DataFrame(np.nan, index=where, columns=self.columns)
else:
from pandas import Series
return Series(np.nan, index=self.columns, name=where[0])
locs = self.index.asof_locs(where, ~(nulls.values))
# mask the missing
missing = locs == -1
data = self.take(locs, is_copy=False)
data.index = where
data.loc[missing] = np.nan
return data if is_list else data.iloc[-1]
# ----------------------------------------------------------------------
# Action Methods
_shared_docs[
"isna"
] = """
Detect missing values.
Return a boolean same-sized object indicating if the values are NA.
NA values, such as None or :attr:`numpy.NaN`, gets mapped to True
values.
Everything else gets mapped to False values. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.isnull : Alias of isna.
%(klass)s.notna : Boolean inverse of isna.
%(klass)s.dropna : Omit axes labels with missing values.
isna : Top-level isna.
Examples
--------
Show which entries in a DataFrame are NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.isna()
age born name toy
0 False True False True
1 False False False False
2 True False False False
Show which entries in a Series are NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.isna()
0 False
1 False
2 True
dtype: bool
"""
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isna(self):
return isna(self).__finalize__(self)
@Appender(_shared_docs["isna"] % _shared_doc_kwargs)
def isnull(self):
return isna(self).__finalize__(self)
_shared_docs[
"notna"
] = """
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. Characters such as empty
strings ``''`` or :attr:`numpy.inf` are not considered NA values
(unless you set ``pandas.options.mode.use_inf_as_na = True``).
NA values, such as None or :attr:`numpy.NaN`, get mapped to False
values.
Returns
-------
%(klass)s
Mask of bool values for each element in %(klass)s that
indicates whether an element is not an NA value.
See Also
--------
%(klass)s.notnull : Alias of notna.
%(klass)s.isna : Boolean inverse of notna.
%(klass)s.dropna : Omit axes labels with missing values.
notna : Top-level notna.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = pd.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notna()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = pd.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ser.notna()
0 True
1 True
2 False
dtype: bool
"""
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notna(self):
return notna(self).__finalize__(self)
@Appender(_shared_docs["notna"] % _shared_doc_kwargs)
def notnull(self):
return notna(self).__finalize__(self)
def _clip_with_scalar(self, lower, upper, inplace=False):
if (lower is not None and np.any(isna(lower))) or (
upper is not None and np.any(isna(upper))
):
raise ValueError("Cannot use an NA value as a clip threshold")
result = self
mask = isna(self.values)
with np.errstate(all="ignore"):
if upper is not None:
subset = self.to_numpy() <= upper
result = result.where(subset, upper, axis=None, inplace=False)
if lower is not None:
subset = self.to_numpy() >= lower
result = result.where(subset, lower, axis=None, inplace=False)
if np.any(mask):
result[mask] = np.nan
if inplace:
self._update_inplace(result)
else:
return result
def _clip_with_one_bound(self, threshold, method, axis, inplace):
if axis is not None:
axis = self._get_axis_number(axis)
# method is self.le for upper bound and self.ge for lower bound
if is_scalar(threshold) and is_number(threshold):
if method.__name__ == "le":
return self._clip_with_scalar(None, threshold, inplace=inplace)
return self._clip_with_scalar(threshold, None, inplace=inplace)
subset = method(threshold, axis=axis) | isna(self)
# GH #15390
# In order for where method to work, the threshold must
# be transformed to NDFrame from other array like structure.
if (not isinstance(threshold, ABCSeries)) and is_list_like(threshold):
if isinstance(self, ABCSeries):
threshold = self._constructor(threshold, index=self.index)
else:
threshold = _align_method_FRAME(self, threshold, axis)
return self.where(subset, threshold, axis=axis, inplace=inplace)
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args, **kwargs):
"""
Trim values at input threshold(s).
Assigns values outside boundary to boundary values. Thresholds
can be singular values or array like, and in the latter case
the clipping is performed element-wise in the specified axis.
Parameters
----------
lower : float or array_like, default None
Minimum threshold value. All values below this
threshold will be set to it.
upper : float or array_like, default None
Maximum threshold value. All values above this
threshold will be set to it.
axis : int or str axis name, optional
Align object with lower and upper along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
*args, **kwargs
Additional keywords have no effect but might be accepted
for compatibility with numpy.
Returns
-------
Series or DataFrame
Same type as calling object with the values outside the
clip boundaries replaced.
Examples
--------
>>> data = {'col_0': [9, -3, 0, -1, 5], 'col_1': [-2, -7, 6, 8, -5]}
>>> df = pd.DataFrame(data)
>>> df
col_0 col_1
0 9 -2
1 -3 -7
2 0 6
3 -1 8
4 5 -5
Clips per column using lower and upper thresholds:
>>> df.clip(-4, 6)
col_0 col_1
0 6 -2
1 -3 -4
2 0 6
3 -1 6
4 5 -4
Clips using specific lower and upper thresholds per column element:
>>> t = pd.Series([2, -4, -1, 6, 3])
>>> t
0 2
1 -4
2 -1
3 6
4 3
dtype: int64
>>> df.clip(t, t + 4, axis=0)
col_0 col_1
0 6 2
1 -3 -4
2 0 3
3 6 8
4 5 3
"""
inplace = validate_bool_kwarg(inplace, "inplace")
axis = nv.validate_clip_with_axis(axis, args, kwargs)
if axis is not None:
axis = self._get_axis_number(axis)
# GH 17276
# numpy doesn't like NaN as a clip value
# so ignore
# GH 19992
# numpy doesn't drop a list-like bound containing NaN
if not is_list_like(lower) and np.any(isna(lower)):
lower = None
if not is_list_like(upper) and np.any(isna(upper)):
upper = None
# GH 2747 (arguments were reversed)
if lower is not None and upper is not None:
if is_scalar(lower) and is_scalar(upper):
lower, upper = min(lower, upper), max(lower, upper)
# fast-path for scalars
if (lower is None or (is_scalar(lower) and is_number(lower))) and (
upper is None or (is_scalar(upper) and is_number(upper))
):
return self._clip_with_scalar(lower, upper, inplace=inplace)
result = self
if lower is not None:
result = result._clip_with_one_bound(
lower, method=self.ge, axis=axis, inplace=inplace
)
if upper is not None:
if inplace:
result = self
result = result._clip_with_one_bound(
upper, method=self.le, axis=axis, inplace=inplace
)
return result
def clip_upper(self, threshold, axis=None, inplace=False):
"""
Trim values above a given threshold.
.. deprecated:: 0.24.0
Use clip(upper=threshold) instead.
Elements above the `threshold` will be changed to match the
`threshold` value(s). Threshold can be a single value or an array,
in the latter case it performs the truncation element-wise.
Parameters
----------
threshold : numeric or array-like
Maximum value allowed. All values above threshold will be set to
this value.
* float : every value is compared to `threshold`.
* array-like : The shape of `threshold` should match the object
it's compared to. When `self` is a Series, `threshold` should be
the length. When `self` is a DataFrame, `threshold` should 2-D
and the same shape as `self` for ``axis=None``, or 1-D and the
same length as the axis being compared.
axis : {0 or 'index', 1 or 'columns'}, default 0
Align object with `threshold` along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
Returns
-------
Series or DataFrame
Original data with values trimmed.
See Also
--------
Series.clip : General purpose method to trim Series values to given
threshold(s).
DataFrame.clip : General purpose method to trim DataFrame values to
given threshold(s).
Examples
--------
>>> s = pd.Series([1, 2, 3, 4, 5])
>>> s
0 1
1 2
2 3
3 4
4 5
dtype: int64
>>> s.clip(upper=3)
0 1
1 2
2 3
3 3
4 3
dtype: int64
>>> elemwise_thresholds = [5, 4, 3, 2, 1]
>>> elemwise_thresholds
[5, 4, 3, 2, 1]
>>> s.clip(upper=elemwise_thresholds)
0 1
1 2
2 3
3 2
4 1
dtype: int64
"""
warnings.warn(
"clip_upper(threshold) is deprecated, use clip(upper=threshold) instead",
FutureWarning,
stacklevel=2,
)
return self._clip_with_one_bound(
threshold, method=self.le, axis=axis, inplace=inplace
)
def clip_lower(self, threshold, axis=None, inplace=False):
"""
Trim values below a given threshold.
.. deprecated:: 0.24.0
Use clip(lower=threshold) instead.
Elements below the `threshold` will be changed to match the
`threshold` value(s). Threshold can be a single value or an array,
in the latter case it performs the truncation element-wise.
Parameters
----------
threshold : numeric or array-like
Minimum value allowed. All values below threshold will be set to
this value.
* float : every value is compared to `threshold`.
* array-like : The shape of `threshold` should match the object
it's compared to. When `self` is a Series, `threshold` should be
the length. When `self` is a DataFrame, `threshold` should 2-D
and the same shape as `self` for ``axis=None``, or 1-D and the
same length as the axis being compared.
axis : {0 or 'index', 1 or 'columns'}, default 0
Align `self` with `threshold` along the given axis.
inplace : bool, default False
Whether to perform the operation in place on the data.
.. versionadded:: 0.21.0
Returns
-------
Series or DataFrame
Original data with values trimmed.
See Also
--------
Series.clip : General purpose method to trim Series values to given
threshold(s).
DataFrame.clip : General purpose method to trim DataFrame values to
given threshold(s).
Examples
--------
Series single threshold clipping:
>>> s = pd.Series([5, 6, 7, 8, 9])
>>> s.clip(lower=8)
0 8
1 8
2 8
3 8
4 9
dtype: int64
Series clipping element-wise using an array of thresholds. `threshold`
should be the same length as the Series.
>>> elemwise_thresholds = [4, 8, 7, 2, 5]
>>> s.clip(lower=elemwise_thresholds)
0 5
1 8
2 7
3 8
4 9
dtype: int64
DataFrames can be compared to a scalar.
>>> df = pd.DataFrame({"A": [1, 3, 5], "B": [2, 4, 6]})
>>> df
A B
0 1 2
1 3 4
2 5 6
>>> df.clip(lower=3)
A B
0 3 3
1 3 4
2 5 6
Or to an array of values. By default, `threshold` should be the same
shape as the DataFrame.
>>> df.clip(lower=np.array([[3, 4], [2, 2], [6, 2]]))
A B
0 3 4
1 3 4
2 6 6
Control how `threshold` is broadcast with `axis`. In this case
`threshold` should be the same length as the axis specified by
`axis`.
>>> df.clip(lower=[3, 3, 5], axis='index')
A B
0 3 3
1 3 4
2 5 6
>>> df.clip(lower=[4, 5], axis='columns')
A B
0 4 5
1 4 5
2 5 6
"""
warnings.warn(
"clip_lower(threshold) is deprecated, use clip(lower=threshold) instead",
FutureWarning,
stacklevel=2,
)
return self._clip_with_one_bound(
threshold, method=self.ge, axis=axis, inplace=inplace
)
def groupby(
self,
by=None,
axis=0,
level=None,
as_index=True,
sort=True,
group_keys=True,
squeeze=False,
observed=False,
**kwargs
):
"""
Group DataFrame or Series using a mapper or by a Series of columns.
A groupby operation involves some combination of splitting the
object, applying a function, and combining the results. This can be
used to group large amounts of data and compute operations on these
groups.
Parameters
----------
by : mapping, function, label, or list of labels
Used to determine the groups for the groupby.
If ``by`` is a function, it's called on each value of the object's
index. If a dict or Series is passed, the Series or dict VALUES
will be used to determine the groups (the Series' values are first
aligned; see ``.align()`` method). If an ndarray is passed, the
values are used as-is determine the groups. A label or list of
labels may be passed to group by the columns in ``self``. Notice
that a tuple is interpreted as a (single) key.
axis : {0 or 'index', 1 or 'columns'}, default 0
Split along rows (0) or columns (1).
level : int, level name, or sequence of such, default None
If the axis is a MultiIndex (hierarchical), group by a particular
level or levels.
as_index : bool, default True
For aggregated output, return object with group labels as the
index. Only relevant for DataFrame input. as_index=False is
effectively "SQL-style" grouped output.
sort : bool, default True
Sort group keys. Get better performance by turning this off.
Note this does not influence the order of observations within each
group. Groupby preserves the order of rows within each group.
group_keys : bool, default True
When calling apply, add group keys to index to identify pieces.
squeeze : bool, default False
Reduce the dimensionality of the return type if possible,
otherwise return a consistent type.
observed : bool, default False
This only applies if any of the groupers are Categoricals.
If True: only show observed values for categorical groupers.
If False: show all values for categorical groupers.
.. versionadded:: 0.23.0
**kwargs
Optional, only accepts keyword argument 'mutated' and is passed
to groupby.
Returns
-------
DataFrameGroupBy or SeriesGroupBy
Depends on the calling object and returns groupby object that
contains information about the groups.
See Also
--------
resample : Convenience method for frequency conversion and resampling
of time series.
Notes
-----
See the `user guide
<http://pandas.pydata.org/pandas-docs/stable/groupby.html>`_ for more.
Examples
--------
>>> df = pd.DataFrame({'Animal': ['Falcon', 'Falcon',
... 'Parrot', 'Parrot'],
... 'Max Speed': [380., 370., 24., 26.]})
>>> df
Animal Max Speed
0 Falcon 380.0
1 Falcon 370.0
2 Parrot 24.0
3 Parrot 26.0
>>> df.groupby(['Animal']).mean()
Max Speed
Animal
Falcon 375.0
Parrot 25.0
**Hierarchical Indexes**
We can groupby different levels of a hierarchical index
using the `level` parameter:
>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'],
... ['Captive', 'Wild', 'Captive', 'Wild']]
>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type'))
>>> df = pd.DataFrame({'Max Speed': [390., 350., 30., 20.]},
... index=index)
>>> df
Max Speed
Animal Type
Falcon Captive 390.0
Wild 350.0
Parrot Captive 30.0
Wild 20.0
>>> df.groupby(level=0).mean()
Max Speed
Animal
Falcon 370.0
Parrot 25.0
>>> df.groupby(level=1).mean()
Max Speed
Type
Captive 210.0
Wild 185.0
"""
from pandas.core.groupby.groupby import groupby
if level is None and by is None:
raise TypeError("You have to supply one of 'by' and 'level'")
axis = self._get_axis_number(axis)
return groupby(
self,
by=by,
axis=axis,
level=level,
as_index=as_index,
sort=sort,
group_keys=group_keys,
squeeze=squeeze,
observed=observed,
**kwargs
)
def asfreq(self, freq, method=None, how=None, normalize=False, fill_value=None):
"""
Convert TimeSeries to specified frequency.
Optionally provide filling method to pad/backfill missing values.
Returns the original data conformed to a new index with the specified
frequency. ``resample`` is more appropriate if an operation, such as
summarization, is necessary to represent the data at the new frequency.
Parameters
----------
freq : DateOffset or str
method : {'backfill'/'bfill', 'pad'/'ffill'}, default None
Method to use for filling holes in reindexed Series (note this
does not fill NaNs that already were present):
* 'pad' / 'ffill': propagate last valid observation forward to next
valid
* 'backfill' / 'bfill': use NEXT valid observation to fill
how : {'start', 'end'}, default end
For PeriodIndex only, see PeriodIndex.asfreq
normalize : bool, default False
Whether to reset output index to midnight
fill_value : scalar, optional
Value to use for missing values, applied during upsampling (note
this does not fill NaNs that already were present).
.. versionadded:: 0.20.0
Returns
-------
converted : same type as caller
See Also
--------
reindex
Notes
-----
To learn more about the frequency strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
Start by creating a series with 4 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=4, freq='T')
>>> series = pd.Series([0.0, None, 2.0, 3.0], index=index)
>>> df = pd.DataFrame({'s':series})
>>> df
s
2000-01-01 00:00:00 0.0
2000-01-01 00:01:00 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:03:00 3.0
Upsample the series into 30 second bins.
>>> df.asfreq(freq='30S')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 NaN
2000-01-01 00:03:00 3.0
Upsample again, providing a ``fill value``.
>>> df.asfreq(freq='30S', fill_value=9.0)
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 9.0
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 9.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 9.0
2000-01-01 00:03:00 3.0
Upsample again, providing a ``method``.
>>> df.asfreq(freq='30S', method='bfill')
s
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 NaN
2000-01-01 00:01:30 2.0
2000-01-01 00:02:00 2.0
2000-01-01 00:02:30 3.0
2000-01-01 00:03:00 3.0
"""
from pandas.core.resample import asfreq
return asfreq(
self,
freq,
method=method,
how=how,
normalize=normalize,
fill_value=fill_value,
)
def at_time(self, time, asof=False, axis=None):
"""
Select values at particular time of day (e.g. 9:30AM).
Parameters
----------
time : datetime.time or str
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
between_time : Select values between particular times of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_at_time : Get just the index locations for
values at particular time of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='12H')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-09 12:00:00 2
2018-04-10 00:00:00 3
2018-04-10 12:00:00 4
>>> ts.at_time('12:00')
A
2018-04-09 12:00:00 2
2018-04-10 12:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_at_time(time, asof=asof)
except AttributeError:
raise TypeError("Index must be DatetimeIndex")
return self.take(indexer, axis=axis)
def between_time(
self, start_time, end_time, include_start=True, include_end=True, axis=None
):
"""
Select values between particular times of the day (e.g., 9:00-9:30 AM).
By setting ``start_time`` to be later than ``end_time``,
you can get the times that are *not* between the two times.
Parameters
----------
start_time : datetime.time or str
end_time : datetime.time or str
include_start : bool, default True
include_end : bool, default True
axis : {0 or 'index', 1 or 'columns'}, default 0
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
at_time : Select values at a particular time of the day.
first : Select initial periods of time series based on a date offset.
last : Select final periods of time series based on a date offset.
DatetimeIndex.indexer_between_time : Get just the index locations for
values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='1D20min')
>>> ts = pd.DataFrame({'A': [1, 2, 3, 4]}, index=i)
>>> ts
A
2018-04-09 00:00:00 1
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
2018-04-12 01:00:00 4
>>> ts.between_time('0:15', '0:45')
A
2018-04-10 00:20:00 2
2018-04-11 00:40:00 3
You get the times that are *not* between two times by setting
``start_time`` later than ``end_time``:
>>> ts.between_time('0:45', '0:15')
A
2018-04-09 00:00:00 1
2018-04-12 01:00:00 4
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
index = self._get_axis(axis)
try:
indexer = index.indexer_between_time(
start_time,
end_time,
include_start=include_start,
include_end=include_end,
)
except AttributeError:
raise TypeError("Index must be DatetimeIndex")
return self.take(indexer, axis=axis)
def resample(
self,
rule,
how=None,
axis=0,
fill_method=None,
closed=None,
label=None,
convention="start",
kind=None,
loffset=None,
limit=None,
base=0,
on=None,
level=None,
):
"""
Resample time-series data.
Convenience method for frequency conversion and resampling of time
series. Object must have a datetime-like index (`DatetimeIndex`,
`PeriodIndex`, or `TimedeltaIndex`), or pass datetime-like values
to the `on` or `level` keyword.
Parameters
----------
rule : DateOffset, Timedelta or str
The offset string or object representing target conversion.
how : str
Method for down/re-sampling, default to 'mean' for downsampling.
.. deprecated:: 0.18.0
The new syntax is ``.resample(...).mean()``, or
``.resample(...).apply(<func>)``
axis : {0 or 'index', 1 or 'columns'}, default 0
Which axis to use for up- or down-sampling. For `Series` this
will default to 0, i.e. along the rows. Must be
`DatetimeIndex`, `TimedeltaIndex` or `PeriodIndex`.
fill_method : str, default None
Filling method for upsampling.
.. deprecated:: 0.18.0
The new syntax is ``.resample(...).<func>()``,
e.g. ``.resample(...).pad()``
closed : {'right', 'left'}, default None
Which side of bin interval is closed. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
label : {'right', 'left'}, default None
Which bin edge label to label bucket with. The default is 'left'
for all frequency offsets except for 'M', 'A', 'Q', 'BM',
'BA', 'BQ', and 'W' which all have a default of 'right'.
convention : {'start', 'end', 's', 'e'}, default 'start'
For `PeriodIndex` only, controls whether to use the start or
end of `rule`.
kind : {'timestamp', 'period'}, optional, default None
Pass 'timestamp' to convert the resulting index to a
`DateTimeIndex` or 'period' to convert it to a `PeriodIndex`.
By default the input representation is retained.
loffset : timedelta, default None
Adjust the resampled time labels.
limit : int, default None
Maximum size gap when reindexing with `fill_method`.
.. deprecated:: 0.18.0
base : int, default 0
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '5min' frequency, base could
range from 0 through 4. Defaults to 0.
on : str, optional
For a DataFrame, column to use instead of index for resampling.
Column must be datetime-like.
level : str or int, optional
For a MultiIndex, level (name or number) to use for
resampling. `level` must be datetime-like.
Returns
-------
Resampler object
See Also
--------
groupby : Group by mapping, function, label, or list of labels.
Series.resample : Resample a Series.
DataFrame.resample: Resample a DataFrame.
Notes
-----
See the `user guide
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#resampling>`_
for more.
To learn more about the offset strings, please see `this link
<http://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects>`__.
Examples
--------
Start by creating a series with 9 one minute timestamps.
>>> index = pd.date_range('1/1/2000', periods=9, freq='T')
>>> series = pd.Series(range(9), index=index)
>>> series
2000-01-01 00:00:00 0
2000-01-01 00:01:00 1
2000-01-01 00:02:00 2
2000-01-01 00:03:00 3
2000-01-01 00:04:00 4
2000-01-01 00:05:00 5
2000-01-01 00:06:00 6
2000-01-01 00:07:00 7
2000-01-01 00:08:00 8
Freq: T, dtype: int64
Downsample the series into 3 minute bins and sum the values
of the timestamps falling into a bin.
>>> series.resample('3T').sum()
2000-01-01 00:00:00 3
2000-01-01 00:03:00 12
2000-01-01 00:06:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but label each
bin using the right edge instead of the left. Please note that the
value in the bucket used as the label is not included in the bucket,
which it labels. For example, in the original series the
bucket ``2000-01-01 00:03:00`` contains the value 3, but the summed
value in the resampled bucket with the label ``2000-01-01 00:03:00``
does not include 3 (if it did, the summed value would be 6, not 3).
To include this value close the right side of the bin interval as
illustrated in the example below this one.
>>> series.resample('3T', label='right').sum()
2000-01-01 00:03:00 3
2000-01-01 00:06:00 12
2000-01-01 00:09:00 21
Freq: 3T, dtype: int64
Downsample the series into 3 minute bins as above, but close the right
side of the bin interval.
>>> series.resample('3T', label='right', closed='right').sum()
2000-01-01 00:00:00 0
2000-01-01 00:03:00 6
2000-01-01 00:06:00 15
2000-01-01 00:09:00 15
Freq: 3T, dtype: int64
Upsample the series into 30 second bins.
>>> series.resample('30S').asfreq()[0:5] # Select first 5 rows
2000-01-01 00:00:00 0.0
2000-01-01 00:00:30 NaN
2000-01-01 00:01:00 1.0
2000-01-01 00:01:30 NaN
2000-01-01 00:02:00 2.0
Freq: 30S, dtype: float64
Upsample the series into 30 second bins and fill the ``NaN``
values using the ``pad`` method.
>>> series.resample('30S').pad()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 0
2000-01-01 00:01:00 1
2000-01-01 00:01:30 1
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Upsample the series into 30 second bins and fill the
``NaN`` values using the ``bfill`` method.
>>> series.resample('30S').bfill()[0:5]
2000-01-01 00:00:00 0
2000-01-01 00:00:30 1
2000-01-01 00:01:00 1
2000-01-01 00:01:30 2
2000-01-01 00:02:00 2
Freq: 30S, dtype: int64
Pass a custom function via ``apply``
>>> def custom_resampler(array_like):
... return np.sum(array_like) + 5
...
>>> series.resample('3T').apply(custom_resampler)
2000-01-01 00:00:00 8
2000-01-01 00:03:00 17
2000-01-01 00:06:00 26
Freq: 3T, dtype: int64
For a Series with a PeriodIndex, the keyword `convention` can be
used to control whether to use the start or end of `rule`.
Resample a year by quarter using 'start' `convention`. Values are
assigned to the first quarter of the period.
>>> s = pd.Series([1, 2], index=pd.period_range('2012-01-01',
... freq='A',
... periods=2))
>>> s
2012 1
2013 2
Freq: A-DEC, dtype: int64
>>> s.resample('Q', convention='start').asfreq()
2012Q1 1.0
2012Q2 NaN
2012Q3 NaN
2012Q4 NaN
2013Q1 2.0
2013Q2 NaN
2013Q3 NaN
2013Q4 NaN
Freq: Q-DEC, dtype: float64
Resample quarters by month using 'end' `convention`. Values are
assigned to the last month of the period.
>>> q = pd.Series([1, 2, 3, 4], index=pd.period_range('2018-01-01',
... freq='Q',
... periods=4))
>>> q
2018Q1 1
2018Q2 2
2018Q3 3
2018Q4 4
Freq: Q-DEC, dtype: int64
>>> q.resample('M', convention='end').asfreq()
2018-03 1.0
2018-04 NaN
2018-05 NaN
2018-06 2.0
2018-07 NaN
2018-08 NaN
2018-09 3.0
2018-10 NaN
2018-11 NaN
2018-12 4.0
Freq: M, dtype: float64
For DataFrame objects, the keyword `on` can be used to specify the
column instead of the index for resampling.
>>> d = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df = pd.DataFrame(d)
>>> df['week_starting'] = pd.date_range('01/01/2018',
... periods=8,
... freq='W')
>>> df
price volume week_starting
0 10 50 2018-01-07
1 11 60 2018-01-14
2 9 40 2018-01-21
3 13 100 2018-01-28
4 14 50 2018-02-04
5 18 100 2018-02-11
6 17 40 2018-02-18
7 19 50 2018-02-25
>>> df.resample('M', on='week_starting').mean()
price volume
week_starting
2018-01-31 10.75 62.5
2018-02-28 17.00 60.0
For a DataFrame with MultiIndex, the keyword `level` can be used to
specify on which level the resampling needs to take place.
>>> days = pd.date_range('1/1/2000', periods=4, freq='D')
>>> d2 = dict({'price': [10, 11, 9, 13, 14, 18, 17, 19],
... 'volume': [50, 60, 40, 100, 50, 100, 40, 50]})
>>> df2 = pd.DataFrame(d2,
... index=pd.MultiIndex.from_product([days,
... ['morning',
... 'afternoon']]
... ))
>>> df2
price volume
2000-01-01 morning 10 50
afternoon 11 60
2000-01-02 morning 9 40
afternoon 13 100
2000-01-03 morning 14 50
afternoon 18 100
2000-01-04 morning 17 40
afternoon 19 50
>>> df2.resample('D', level=0).sum()
price volume
2000-01-01 21 110
2000-01-02 22 140
2000-01-03 32 150
2000-01-04 36 90
"""
from pandas.core.resample import resample, _maybe_process_deprecations
axis = self._get_axis_number(axis)
r = resample(
self,
freq=rule,
label=label,
closed=closed,
axis=axis,
kind=kind,
loffset=loffset,
convention=convention,
base=base,
key=on,
level=level,
)
return _maybe_process_deprecations(
r, how=how, fill_method=fill_method, limit=limit
)
def first(self, offset):
"""
Convenience method for subsetting initial periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
last : Select final periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the first 3 days:
>>> ts.first('3D')
A
2018-04-09 1
2018-04-11 2
Notice the data for 3 first calender days were returned, not the first
3 days observed in the dataset, and therefore data for 2018-04-13 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'first' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
end_date = end = self.index[0] + offset
# Tick-like, e.g. 3 weeks
if not offset.isAnchored() and hasattr(offset, "_inc"):
if end_date in self.index:
end = self.index.searchsorted(end_date, side="left")
return self.iloc[:end]
return self.loc[:end]
def last(self, offset):
"""
Convenience method for subsetting final periods of time series data
based on a date offset.
Parameters
----------
offset : string, DateOffset, dateutil.relativedelta
Returns
-------
subset : same type as caller
Raises
------
TypeError
If the index is not a :class:`DatetimeIndex`
See Also
--------
first : Select initial periods of time series based on a date offset.
at_time : Select values at a particular time of the day.
between_time : Select values between particular times of the day.
Examples
--------
>>> i = pd.date_range('2018-04-09', periods=4, freq='2D')
>>> ts = pd.DataFrame({'A': [1,2,3,4]}, index=i)
>>> ts
A
2018-04-09 1
2018-04-11 2
2018-04-13 3
2018-04-15 4
Get the rows for the last 3 days:
>>> ts.last('3D')
A
2018-04-13 3
2018-04-15 4
Notice the data for 3 last calender days were returned, not the last
3 observed days in the dataset, and therefore data for 2018-04-11 was
not returned.
"""
if not isinstance(self.index, DatetimeIndex):
raise TypeError("'last' only supports a DatetimeIndex index")
if len(self.index) == 0:
return self
offset = to_offset(offset)
start_date = self.index[-1] - offset
start = self.index.searchsorted(start_date, side="right")
return self.iloc[start:]
def rank(
self,
axis=0,
method="average",
numeric_only=None,
na_option="keep",
ascending=True,
pct=False,
):
"""
Compute numerical data ranks (1 through n) along axis.
By default, equal values are assigned a rank that is the average of the
ranks of those values.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
Index to direct ranking.
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
How to rank the group of records that have the same value
(i.e. ties):
* average: average rank of the group
* min: lowest rank in the group
* max: highest rank in the group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
numeric_only : bool, optional
For DataFrame objects, rank only numeric columns if set to True.
na_option : {'keep', 'top', 'bottom'}, default 'keep'
How to rank NaN values:
* keep: assign NaN rank to NaN values
* top: assign smallest rank to NaN values if ascending
* bottom: assign highest rank to NaN values if ascending
ascending : bool, default True
Whether or not the elements should be ranked in ascending order.
pct : bool, default False
Whether or not to display the returned rankings in percentile
form.
Returns
-------
same type as caller
Return a Series or DataFrame with data ranks as values.
See Also
--------
core.groupby.GroupBy.rank : Rank of values within each group.
Examples
--------
>>> df = pd.DataFrame(data={'Animal': ['cat', 'penguin', 'dog',
... 'spider', 'snake'],
... 'Number_legs': [4, 2, 4, 8, np.nan]})
>>> df
Animal Number_legs
0 cat 4.0
1 penguin 2.0
2 dog 4.0
3 spider 8.0
4 snake NaN
The following example shows how the method behaves with the above
parameters:
* default_rank: this is the default behaviour obtained without using
any parameter.
* max_rank: setting ``method = 'max'`` the records that have the
same values are ranked using the highest rank (e.g.: since 'cat'
and 'dog' are both in the 2nd and 3rd position, rank 3 is assigned.)
* NA_bottom: choosing ``na_option = 'bottom'``, if there are records
with NaN values they are placed at the bottom of the ranking.
* pct_rank: when setting ``pct = True``, the ranking is expressed as
percentile rank.
>>> df['default_rank'] = df['Number_legs'].rank()
>>> df['max_rank'] = df['Number_legs'].rank(method='max')
>>> df['NA_bottom'] = df['Number_legs'].rank(na_option='bottom')
>>> df['pct_rank'] = df['Number_legs'].rank(pct=True)
>>> df
Animal Number_legs default_rank max_rank NA_bottom pct_rank
0 cat 4.0 2.5 3.0 2.5 0.625
1 penguin 2.0 1.0 1.0 1.0 0.250
2 dog 4.0 2.5 3.0 2.5 0.625
3 spider 8.0 4.0 4.0 4.0 1.000
4 snake NaN NaN NaN 5.0 NaN
"""
axis = self._get_axis_number(axis)
if na_option not in {"keep", "top", "bottom"}:
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
raise ValueError(msg)
def ranker(data):
ranks = algos.rank(
data.values,
axis=axis,
method=method,
ascending=ascending,
na_option=na_option,
pct=pct,
)
ranks = self._constructor(ranks, **data._construct_axes_dict())
return ranks.__finalize__(self)
# if numeric_only is None, and we can't get anything, we try with
# numeric_only=True
if numeric_only is None:
try:
return ranker(self)
except TypeError:
numeric_only = True
if numeric_only:
data = self._get_numeric_data()
else:
data = self
return ranker(data)
_shared_docs[
"align"
] = """
Align two objects on their axes with the
specified join method for each axis Index.
Parameters
----------
other : DataFrame or Series
join : {'outer', 'inner', 'left', 'right'}, default 'outer'
axis : allowed axis of the other object, default None
Align on index (0), columns (1), or both (None)
level : int or level name, default None
Broadcast across a level, matching Index values on the
passed MultiIndex level
copy : bool, default True
Always returns new objects. If copy=False and no reindexing is
required then original objects are returned.
fill_value : scalar, default np.NaN
Value to use for missing values. Defaults to NaN, but can be any
"compatible" value
method : {'backfill', 'bfill', 'pad', 'ffill', None}, default None
Method to use for filling holes in reindexed Series
pad / ffill: propagate last valid observation forward to next valid
backfill / bfill: use NEXT valid observation to fill gap
limit : int, default None
If method is specified, this is the maximum number of consecutive
NaN values to forward/backward fill. In other words, if there is
a gap with more than this number of consecutive NaNs, it will only
be partially filled. If method is not specified, this is the
maximum number of entries along the entire axis where NaNs will be
filled. Must be greater than 0 if not None.
fill_axis : %(axes_single_arg)s, default 0
Filling axis, method and limit
broadcast_axis : %(axes_single_arg)s, default None
Broadcast values along this axis, if aligning two objects of
different dimensions
Returns
-------
(left, right) : (%(klass)s, type of other)
Aligned objects.
"""
@Appender(_shared_docs["align"] % _shared_doc_kwargs)
def align(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
broadcast_axis=None,
):
method = missing.clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, ABCSeries):
# this means other is a DataFrame, and we need to broadcast
# self
cons = self._constructor_expanddim
df = cons(
{c: self for c in other.columns}, **other._construct_axes_dict()
)
return df._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
# this means self is a DataFrame, and we need to broadcast
# other
cons = other._constructor_expanddim
df = cons(
{c: other for c in self.columns}, **self._construct_axes_dict()
)
return self._align_frame(
df,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
if axis is not None:
axis = self._get_axis_number(axis)
if isinstance(other, ABCDataFrame):
return self._align_frame(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
elif isinstance(other, ABCSeries):
return self._align_series(
other,
join=join,
axis=axis,
level=level,
copy=copy,
fill_value=fill_value,
method=method,
limit=limit,
fill_axis=fill_axis,
)
else: # pragma: no cover
raise TypeError("unsupported type: %s" % type(other))
def _align_frame(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
# defaults
join_index, join_columns = None, None
ilidx, iridx = None, None
clidx, cridx = None, None
is_series = isinstance(self, ABCSeries)
if axis is None or axis == 0:
if not self.index.equals(other.index):
join_index, ilidx, iridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if axis is None or axis == 1:
if not is_series and not self.columns.equals(other.columns):
join_columns, clidx, cridx = self.columns.join(
other.columns, how=join, level=level, return_indexers=True
)
if is_series:
reindexers = {0: [join_index, ilidx]}
else:
reindexers = {0: [join_index, ilidx], 1: [join_columns, clidx]}
left = self._reindex_with_indexers(
reindexers, copy=copy, fill_value=fill_value, allow_dups=True
)
# other must be always DataFrame
right = other._reindex_with_indexers(
{0: [join_index, iridx], 1: [join_columns, cridx]},
copy=copy,
fill_value=fill_value,
allow_dups=True,
)
if method is not None:
left = left.fillna(axis=fill_axis, method=method, limit=limit)
right = right.fillna(axis=fill_axis, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _align_series(
self,
other,
join="outer",
axis=None,
level=None,
copy=True,
fill_value=None,
method=None,
limit=None,
fill_axis=0,
):
is_series = isinstance(self, ABCSeries)
# series/series compat, other must always be a Series
if is_series:
if axis:
raise ValueError("cannot align series to a series other than axis 0")
# equal
if self.index.equals(other.index):
join_index, lidx, ridx = None, None, None
else:
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
left = self._reindex_indexer(join_index, lidx, copy)
right = other._reindex_indexer(join_index, ridx, copy)
else:
# one has > 1 ndim
fdata = self._data
if axis == 0:
join_index = self.index
lidx, ridx = None, None
if not self.index.equals(other.index):
join_index, lidx, ridx = self.index.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=1)
elif axis == 1:
join_index = self.columns
lidx, ridx = None, None
if not self.columns.equals(other.index):
join_index, lidx, ridx = self.columns.join(
other.index, how=join, level=level, return_indexers=True
)
if lidx is not None:
fdata = fdata.reindex_indexer(join_index, lidx, axis=0)
else:
raise ValueError("Must specify axis=0 or 1")
if copy and fdata is self._data:
fdata = fdata.copy()
left = self._constructor(fdata)
if ridx is None:
right = other
else:
right = other.reindex(join_index, level=level)
# fill
fill_na = notna(fill_value) or (method is not None)
if fill_na:
left = left.fillna(fill_value, method=method, limit=limit, axis=fill_axis)
right = right.fillna(fill_value, method=method, limit=limit)
# if DatetimeIndex have different tz, convert to UTC
if is_series or (not is_series and axis == 0):
if is_datetime64tz_dtype(left.index):
if left.index.tz != right.index.tz:
if join_index is not None:
left.index = join_index
right.index = join_index
return left.__finalize__(self), right.__finalize__(other)
def _where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
"""
Equivalent to public method `where`, except that `other` is not
applied as a function even if callable. Used in __setitem__.
"""
inplace = validate_bool_kwarg(inplace, "inplace")
# align the cond to same shape as myself
cond = com.apply_if_callable(cond, self)
if isinstance(cond, NDFrame):
cond, _ = cond.align(self, join="right", broadcast_axis=1)
else:
if not hasattr(cond, "shape"):
cond = np.asanyarray(cond)
if cond.shape != self.shape:
raise ValueError("Array conditional must be same shape as self")
cond = self._constructor(cond, **self._construct_axes_dict())
# make sure we are boolean
fill_value = bool(inplace)
cond = cond.fillna(fill_value)
msg = "Boolean array expected for the condition, not {dtype}"
if not isinstance(cond, ABCDataFrame):
# This is a single-dimensional object.
if not is_bool_dtype(cond):
raise ValueError(msg.format(dtype=cond.dtype))
elif not cond.empty:
for dt in cond.dtypes:
if not is_bool_dtype(dt):
raise ValueError(msg.format(dtype=dt))
cond = -cond if inplace else cond
# try to align with other
try_quick = True
if hasattr(other, "align"):
# align with me
if other.ndim <= self.ndim:
_, other = self.align(
other, join="left", axis=axis, level=level, fill_value=np.nan
)
# if we are NOT aligned, raise as we cannot where index
if axis is None and not all(
other._get_axis(i).equals(ax) for i, ax in enumerate(self.axes)
):
raise InvalidIndexError
# slice me out of the other
else:
raise NotImplementedError(
"cannot align with a higher dimensional NDFrame"
)
if isinstance(other, np.ndarray):
if other.shape != self.shape:
if self.ndim == 1:
icond = cond.values
# GH 2745 / GH 4192
# treat like a scalar
if len(other) == 1:
other = np.array(other[0])
# GH 3235
# match True cond to other
elif len(cond[icond]) == len(other):
# try to not change dtype at first (if try_quick)
if try_quick:
try:
new_other = com.values_from_object(self)
new_other = new_other.copy()
new_other[icond] = other
other = new_other
except Exception:
try_quick = False
# let's create a new (if we failed at the above
# or not try_quick
if not try_quick:
dtype, fill_value = maybe_promote(other.dtype)
new_other = np.empty(len(icond), dtype=dtype)
new_other.fill(fill_value)
maybe_upcast_putmask(new_other, icond, other)
other = new_other
else:
raise ValueError(
"Length of replacements must equal series length"
)
else:
raise ValueError(
"other must be the same shape as self when an ndarray"
)
# we are the same shape, so create an actual object for alignment
else:
other = self._constructor(other, **self._construct_axes_dict())
if axis is None:
axis = 0
if self.ndim == getattr(other, "ndim", 0):
align = True
else:
align = self._get_axis_number(axis) == 1
block_axis = self._get_block_manager_axis(axis)
if inplace:
# we may have different type blocks come out of putmask, so
# reconstruct the block manager
self._check_inplace_setting(other)
new_data = self._data.putmask(
mask=cond,
new=other,
align=align,
inplace=True,
axis=block_axis,
transpose=self._AXIS_REVERSED,
)
self._update_inplace(new_data)
else:
new_data = self._data.where(
other=other,
cond=cond,
align=align,
errors=errors,
try_cast=try_cast,
axis=block_axis,
)
return self._constructor(new_data).__finalize__(self)
_shared_docs[
"where"
] = """
Replace values where the condition is %(cond_rev)s.
Parameters
----------
cond : boolean %(klass)s, array-like, or callable
Where `cond` is %(cond)s, keep the original value. Where
%(cond_rev)s, replace with corresponding value from `other`.
If `cond` is callable, it is computed on the %(klass)s and
should return boolean %(klass)s or array. The callable must
not change input %(klass)s (though pandas doesn't check it).
other : scalar, %(klass)s, or callable
Entries where `cond` is %(cond_rev)s are replaced with
corresponding value from `other`.
If other is callable, it is computed on the %(klass)s and
should return scalar or %(klass)s. The callable must not
change input %(klass)s (though pandas doesn't check it).
inplace : bool, default False
Whether to perform the operation in place on the data.
axis : int, default None
Alignment axis if needed.
level : int, default None
Alignment level if needed.
errors : str, {'raise', 'ignore'}, default 'raise'
Note that currently this parameter won't affect
the results and will always coerce to a suitable dtype.
- 'raise' : allow exceptions to be raised.
- 'ignore' : suppress exceptions. On error return original object.
try_cast : bool, default False
Try to cast the result back to the input type (if possible).
Returns
-------
Same type as caller
See Also
--------
:func:`DataFrame.%(name_other)s` : Return an object of same shape as
self.
Notes
-----
The %(name)s method is an application of the if-then idiom. For each
element in the calling DataFrame, if ``cond`` is ``%(cond)s`` the
element is used; otherwise the corresponding element from the DataFrame
``other`` is used.
The signature for :func:`DataFrame.where` differs from
:func:`numpy.where`. Roughly ``df1.where(m, df2)`` is equivalent to
``np.where(m, df1, df2)``.
For further details and examples see the ``%(name)s`` documentation in
:ref:`indexing <indexing.where_mask>`.
Examples
--------
>>> s = pd.Series(range(5))
>>> s.where(s > 0)
0 NaN
1 1.0
2 2.0
3 3.0
4 4.0
dtype: float64
>>> s.mask(s > 0)
0 0.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
>>> s.where(s > 1, 10)
0 10
1 10
2 2
3 3
4 4
dtype: int64
>>> df = pd.DataFrame(np.arange(10).reshape(-1, 2), columns=['A', 'B'])
>>> df
A B
0 0 1
1 2 3
2 4 5
3 6 7
4 8 9
>>> m = df %% 3 == 0
>>> df.where(m, -df)
A B
0 0 -1
1 -2 3
2 -4 -5
3 6 -7
4 -8 9
>>> df.where(m, -df) == np.where(m, df, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
>>> df.where(m, -df) == df.mask(~m, -df)
A B
0 True True
1 True True
2 True True
3 True True
4 True True
"""
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="True",
cond_rev="False",
name="where",
name_other="mask",
)
)
def where(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
other = com.apply_if_callable(other, self)
return self._where(
cond, other, inplace, axis, level, errors=errors, try_cast=try_cast
)
@Appender(
_shared_docs["where"]
% dict(
_shared_doc_kwargs,
cond="False",
cond_rev="True",
name="mask",
name_other="where",
)
)
def mask(
self,
cond,
other=np.nan,
inplace=False,
axis=None,
level=None,
errors="raise",
try_cast=False,
):
inplace = validate_bool_kwarg(inplace, "inplace")
cond = com.apply_if_callable(cond, self)
# see gh-21891
if not hasattr(cond, "__invert__"):
cond = np.array(cond)
return self.where(
~cond,
other=other,
inplace=inplace,
axis=axis,
level=level,
try_cast=try_cast,
errors=errors,
)
_shared_docs[
"shift"
] = """
Shift index by desired number of periods with an optional time `freq`.
When `freq` is not passed, shift the index without realigning the data.
If `freq` is passed (in this case, the index must be date or datetime,
or it will raise a `NotImplementedError`), the index will be
increased using the periods and the `freq`.
Parameters
----------
periods : int
Number of periods to shift. Can be positive or negative.
freq : DateOffset, tseries.offsets, timedelta, or str, optional
Offset to use from the tseries module or time rule (e.g. 'EOM').
If `freq` is specified then the index values are shifted but the
data is not realigned. That is, use `freq` if you would like to
extend the index when shifting and preserve the original data.
axis : {0 or 'index', 1 or 'columns', None}, default None
Shift direction.
fill_value : object, optional
The scalar value to use for newly introduced missing values.
the default depends on the dtype of `self`.
For numeric data, ``np.nan`` is used.
For datetime, timedelta, or period data, etc. :attr:`NaT` is used.
For extension dtypes, ``self.dtype.na_value`` is used.
.. versionchanged:: 0.24.0
Returns
-------
%(klass)s
Copy of input object, shifted.
See Also
--------
Index.shift : Shift values of Index.
DatetimeIndex.shift : Shift values of DatetimeIndex.
PeriodIndex.shift : Shift values of PeriodIndex.
tshift : Shift the time index, using the index's frequency if
available.
Examples
--------
>>> df = pd.DataFrame({'Col1': [10, 20, 15, 30, 45],
... 'Col2': [13, 23, 18, 33, 48],
... 'Col3': [17, 27, 22, 37, 52]})
>>> df.shift(periods=3)
Col1 Col2 Col3
0 NaN NaN NaN
1 NaN NaN NaN
2 NaN NaN NaN
3 10.0 13.0 17.0
4 20.0 23.0 27.0
>>> df.shift(periods=1, axis='columns')
Col1 Col2 Col3
0 NaN 10.0 13.0
1 NaN 20.0 23.0
2 NaN 15.0 18.0
3 NaN 30.0 33.0
4 NaN 45.0 48.0
>>> df.shift(periods=3, fill_value=0)
Col1 Col2 Col3
0 0 0 0
1 0 0 0
2 0 0 0
3 10 13 17
4 20 23 27
"""
@Appender(_shared_docs["shift"] % _shared_doc_kwargs)
def shift(self, periods=1, freq=None, axis=0, fill_value=None):
if periods == 0:
return self.copy()
block_axis = self._get_block_manager_axis(axis)
if freq is None:
new_data = self._data.shift(
periods=periods, axis=block_axis, fill_value=fill_value
)
else:
return self.tshift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def slice_shift(self, periods=1, axis=0):
"""
Equivalent to `shift` without copying data. The shifted data will
not include the dropped periods and the shifted axis will be smaller
than the original.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
Returns
-------
shifted : same type as caller
Notes
-----
While the `slice_shift` is faster than `shift`, you may pay for it
later during alignment.
"""
if periods == 0:
return self
if periods > 0:
vslicer = slice(None, -periods)
islicer = slice(periods, None)
else:
vslicer = slice(-periods, None)
islicer = slice(None, periods)
new_obj = self._slice(vslicer, axis=axis)
shifted_axis = self._get_axis(axis)[islicer]
new_obj.set_axis(shifted_axis, axis=axis, inplace=True)
return new_obj.__finalize__(self)
def tshift(self, periods=1, freq=None, axis=0):
"""
Shift the time index, using the index's frequency if available.
Parameters
----------
periods : int
Number of periods to move, can be positive or negative
freq : DateOffset, timedelta, or time rule string, default None
Increment to use from the tseries module or time rule (e.g. 'EOM')
axis : int or basestring
Corresponds to the axis that contains the Index
Returns
-------
shifted : Series/DataFrame
Notes
-----
If freq is not specified then tries to use the freq or inferred_freq
attributes of the index. If neither of those attributes exist, a
ValueError is thrown
"""
index = self._get_axis(axis)
if freq is None:
freq = getattr(index, "freq", None)
if freq is None:
freq = getattr(index, "inferred_freq", None)
if freq is None:
msg = "Freq was not given and was not set in the index"
raise ValueError(msg)
if periods == 0:
return self
if isinstance(freq, str):
freq = to_offset(freq)
block_axis = self._get_block_manager_axis(axis)
if isinstance(index, PeriodIndex):
orig_freq = to_offset(index.freq)
if freq == orig_freq:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods)
else:
msg = "Given freq %s does not match PeriodIndex freq %s" % (
freq.rule_code,
orig_freq.rule_code,
)
raise ValueError(msg)
else:
new_data = self._data.copy()
new_data.axes[block_axis] = index.shift(periods, freq)
return self._constructor(new_data).__finalize__(self)
def truncate(self, before=None, after=None, axis=None, copy=True):
"""
Truncate a Series or DataFrame before and after some index value.
This is a useful shorthand for boolean indexing based on index
values above or below certain thresholds.
Parameters
----------
before : date, string, int
Truncate all rows before this index value.
after : date, string, int
Truncate all rows after this index value.
axis : {0 or 'index', 1 or 'columns'}, optional
Axis to truncate. Truncates the index (rows) by default.
copy : bool, default is True,
Return a copy of the truncated section.
Returns
-------
type of caller
The truncated Series or DataFrame.
See Also
--------
DataFrame.loc : Select a subset of a DataFrame by label.
DataFrame.iloc : Select a subset of a DataFrame by position.
Notes
-----
If the index being truncated contains only datetime values,
`before` and `after` may be specified as strings instead of
Timestamps.
Examples
--------
>>> df = pd.DataFrame({'A': ['a', 'b', 'c', 'd', 'e'],
... 'B': ['f', 'g', 'h', 'i', 'j'],
... 'C': ['k', 'l', 'm', 'n', 'o']},
... index=[1, 2, 3, 4, 5])
>>> df
A B C
1 a f k
2 b g l
3 c h m
4 d i n
5 e j o
>>> df.truncate(before=2, after=4)
A B C
2 b g l
3 c h m
4 d i n
The columns of a DataFrame can be truncated.
>>> df.truncate(before="A", after="B", axis="columns")
A B
1 a f
2 b g
3 c h
4 d i
5 e j
For Series, only rows can be truncated.
>>> df['A'].truncate(before=2, after=4)
2 b
3 c
4 d
Name: A, dtype: object
The index values in ``truncate`` can be datetimes or string
dates.
>>> dates = pd.date_range('2016-01-01', '2016-02-01', freq='s')
>>> df = pd.DataFrame(index=dates, data={'A': 1})
>>> df.tail()
A
2016-01-31 23:59:56 1
2016-01-31 23:59:57 1
2016-01-31 23:59:58 1
2016-01-31 23:59:59 1
2016-02-01 00:00:00 1
>>> df.truncate(before=pd.Timestamp('2016-01-05'),
... after=pd.Timestamp('2016-01-10')).tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Because the index is a DatetimeIndex containing only dates, we can
specify `before` and `after` as strings. They will be coerced to
Timestamps before truncation.
>>> df.truncate('2016-01-05', '2016-01-10').tail()
A
2016-01-09 23:59:56 1
2016-01-09 23:59:57 1
2016-01-09 23:59:58 1
2016-01-09 23:59:59 1
2016-01-10 00:00:00 1
Note that ``truncate`` assumes a 0 value for any unspecified time
component (midnight). This differs from partial string slicing, which
returns any partially matching dates.
>>> df.loc['2016-01-05':'2016-01-10', :].tail()
A
2016-01-10 23:59:55 1
2016-01-10 23:59:56 1
2016-01-10 23:59:57 1
2016-01-10 23:59:58 1
2016-01-10 23:59:59 1
"""
if axis is None:
axis = self._stat_axis_number
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
# GH 17935
# Check that index is sorted
if not ax.is_monotonic_increasing and not ax.is_monotonic_decreasing:
raise ValueError("truncate requires a sorted index")
# if we have a date index, convert to dates, otherwise
# treat like a slice
if ax.is_all_dates:
from pandas.core.tools.datetimes import to_datetime
before = to_datetime(before)
after = to_datetime(after)
if before is not None and after is not None:
if before > after:
raise ValueError("Truncate: %s must be after %s" % (after, before))
slicer = [slice(None, None)] * self._AXIS_LEN
slicer[axis] = slice(before, after)
result = self.loc[tuple(slicer)]
if isinstance(ax, MultiIndex):
setattr(result, self._get_axis_name(axis), ax.truncate(before, after))
if copy:
result = result.copy()
return result
def tz_convert(self, tz, axis=0, level=None, copy=True):
"""
Convert tz-aware axis to target time zone.
Parameters
----------
tz : str or tzinfo object
axis : the axis to convert
level : int, str, default None
If axis is a MultiIndex, convert a specific level. Otherwise
must be None.
copy : bool, default True
Also make a copy of the underlying data.
Returns
-------
%(klass)s
Object with time zone converted axis.
Raises
------
TypeError
If the axis is tz-naive.
"""
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_convert(ax, tz):
if not hasattr(ax, "tz_convert"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
"%s is not a valid DatetimeIndex or PeriodIndex" % ax_name
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_convert(tz)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_convert(ax.levels[level], tz)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_convert(ax, tz)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
def tz_localize(
self, tz, axis=0, level=None, copy=True, ambiguous="raise", nonexistent="raise"
):
"""
Localize tz-naive index of a Series or DataFrame to target time zone.
This operation localizes the Index. To localize the values in a
timezone-naive Series, use :meth:`Series.dt.tz_localize`.
Parameters
----------
tz : str or tzinfo
axis : the axis to localize
level : int, str, default None
If axis ia a MultiIndex, localize a specific level. Otherwise
must be None
copy : bool, default True
Also make a copy of the underlying data
ambiguous : 'infer', bool-ndarray, 'NaT', default 'raise'
When clocks moved backward due to DST, ambiguous times may arise.
For example in Central European Time (UTC+01), when going from
03:00 DST to 02:00 non-DST, 02:30:00 local time occurs both at
00:30:00 UTC and at 01:30:00 UTC. In such a situation, the
`ambiguous` parameter dictates how ambiguous times should be
handled.
- 'infer' will attempt to infer fall dst-transition hours based on
order
- bool-ndarray where True signifies a DST time, False designates
a non-DST time (note that this flag is only applicable for
ambiguous times)
- 'NaT' will return NaT where there are ambiguous times
- 'raise' will raise an AmbiguousTimeError if there are ambiguous
times
nonexistent : str, default 'raise'
A nonexistent time does not exist in a particular timezone
where clocks moved forward due to DST. Valid values are:
- 'shift_forward' will shift the nonexistent time forward to the
closest existing time
- 'shift_backward' will shift the nonexistent time backward to the
closest existing time
- 'NaT' will return NaT where there are nonexistent times
- timedelta objects will shift nonexistent times by the timedelta
- 'raise' will raise an NonExistentTimeError if there are
nonexistent times
.. versionadded:: 0.24.0
Returns
-------
Series or DataFrame
Same type as the input.
Raises
------
TypeError
If the TimeSeries is tz-aware and tz is not None.
Examples
--------
Localize local times:
>>> s = pd.Series([1],
... index=pd.DatetimeIndex(['2018-09-15 01:30:00']))
>>> s.tz_localize('CET')
2018-09-15 01:30:00+02:00 1
dtype: int64
Be careful with DST changes. When there is sequential data, pandas
can infer the DST time:
>>> s = pd.Series(range(7), index=pd.DatetimeIndex([
... '2018-10-28 01:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 02:00:00',
... '2018-10-28 02:30:00',
... '2018-10-28 03:00:00',
... '2018-10-28 03:30:00']))
>>> s.tz_localize('CET', ambiguous='infer')
2018-10-28 01:30:00+02:00 0
2018-10-28 02:00:00+02:00 1
2018-10-28 02:30:00+02:00 2
2018-10-28 02:00:00+01:00 3
2018-10-28 02:30:00+01:00 4
2018-10-28 03:00:00+01:00 5
2018-10-28 03:30:00+01:00 6
dtype: int64
In some cases, inferring the DST is impossible. In such cases, you can
pass an ndarray to the ambiguous parameter to set the DST explicitly
>>> s = pd.Series(range(3), index=pd.DatetimeIndex([
... '2018-10-28 01:20:00',
... '2018-10-28 02:36:00',
... '2018-10-28 03:46:00']))
>>> s.tz_localize('CET', ambiguous=np.array([True, True, False]))
2018-10-28 01:20:00+02:00 0
2018-10-28 02:36:00+02:00 1
2018-10-28 03:46:00+01:00 2
dtype: int64
If the DST transition causes nonexistent times, you can shift these
dates forward or backwards with a timedelta object or `'shift_forward'`
or `'shift_backwards'`.
>>> s = pd.Series(range(2), index=pd.DatetimeIndex([
... '2015-03-29 02:30:00',
... '2015-03-29 03:30:00']))
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_forward')
2015-03-29 03:00:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent='shift_backward')
2015-03-29 01:59:59.999999999+01:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
>>> s.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta('1H'))
2015-03-29 03:30:00+02:00 0
2015-03-29 03:30:00+02:00 1
dtype: int64
"""
nonexistent_options = ("raise", "NaT", "shift_forward", "shift_backward")
if nonexistent not in nonexistent_options and not isinstance(
nonexistent, timedelta
):
raise ValueError(
"The nonexistent argument must be one of 'raise',"
" 'NaT', 'shift_forward', 'shift_backward' or"
" a timedelta object"
)
axis = self._get_axis_number(axis)
ax = self._get_axis(axis)
def _tz_localize(ax, tz, ambiguous, nonexistent):
if not hasattr(ax, "tz_localize"):
if len(ax) > 0:
ax_name = self._get_axis_name(axis)
raise TypeError(
"%s is not a valid DatetimeIndex or PeriodIndex" % ax_name
)
else:
ax = DatetimeIndex([], tz=tz)
else:
ax = ax.tz_localize(tz, ambiguous=ambiguous, nonexistent=nonexistent)
return ax
# if a level is given it must be a MultiIndex level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
new_level = _tz_localize(ax.levels[level], tz, ambiguous, nonexistent)
ax = ax.set_levels(new_level, level=level)
else:
if level not in (None, 0, ax.name):
raise ValueError("The level {0} is not valid".format(level))
ax = _tz_localize(ax, tz, ambiguous, nonexistent)
result = self._constructor(self._data, copy=copy)
result = result.set_axis(ax, axis=axis, inplace=False)
return result.__finalize__(self)
# ----------------------------------------------------------------------
# Numeric Methods
def abs(self):
"""
Return a Series/DataFrame with absolute numeric value of each element.
This function only applies to elements that are all numeric.
Returns
-------
abs
Series/DataFrame containing the absolute value of each element.
See Also
--------
numpy.absolute : Calculate the absolute value element-wise.
Notes
-----
For ``complex`` inputs, ``1.2 + 1j``, the absolute value is
:math:`\\sqrt{ a^2 + b^2 }`.
Examples
--------
Absolute numeric values in a Series.
>>> s = pd.Series([-1.10, 2, -3.33, 4])
>>> s.abs()
0 1.10
1 2.00
2 3.33
3 4.00
dtype: float64
Absolute numeric values in a Series with complex numbers.
>>> s = pd.Series([1.2 + 1j])
>>> s.abs()
0 1.56205
dtype: float64
Absolute numeric values in a Series with a Timedelta element.
>>> s = pd.Series([pd.Timedelta('1 days')])
>>> s.abs()
0 1 days
dtype: timedelta64[ns]
Select rows with data closest to certain value using argsort (from
`StackOverflow <https://stackoverflow.com/a/17758115>`__).
>>> df = pd.DataFrame({
... 'a': [4, 5, 6, 7],
... 'b': [10, 20, 30, 40],
... 'c': [100, 50, -30, -50]
... })
>>> df
a b c
0 4 10 100
1 5 20 50
2 6 30 -30
3 7 40 -50
>>> df.loc[(df.c - 43).abs().argsort()]
a b c
1 5 20 50
0 4 10 100
2 6 30 -30
3 7 40 -50
"""
return np.abs(self)
def describe(self, percentiles=None, include=None, exclude=None):
"""
Generate descriptive statistics that summarize the central tendency,
dispersion and shape of a dataset's distribution, excluding
``NaN`` values.
Analyzes both numeric and object series, as well
as ``DataFrame`` column sets of mixed data types. The output
will vary depending on what is provided. Refer to the notes
below for more detail.
Parameters
----------
percentiles : list-like of numbers, optional
The percentiles to include in the output. All should
fall between 0 and 1. The default is
``[.25, .5, .75]``, which returns the 25th, 50th, and
75th percentiles.
include : 'all', list-like of dtypes or None (default), optional
A white list of data types to include in the result. Ignored
for ``Series``. Here are the options:
- 'all' : All columns of the input will be included in the output.
- A list-like of dtypes : Limits the results to the
provided data types.
To limit the result to numeric types submit
``numpy.number``. To limit it instead to object columns submit
the ``numpy.object`` data type. Strings
can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
select pandas categorical columns, use ``'category'``
- None (default) : The result will include all numeric columns.
exclude : list-like of dtypes or None (default), optional,
A black list of data types to omit from the result. Ignored
for ``Series``. Here are the options:
- A list-like of dtypes : Excludes the provided data types
from the result. To exclude numeric types submit
``numpy.number``. To exclude object columns submit the data
type ``numpy.object``. Strings can also be used in the style of
``select_dtypes`` (e.g. ``df.describe(include=['O'])``). To
exclude pandas categorical columns, use ``'category'``
- None (default) : The result will exclude nothing.
Returns
-------
Series or DataFrame
Summary statistics of the Series or Dataframe provided.
See Also
--------
DataFrame.count: Count number of non-NA/null observations.
DataFrame.max: Maximum of the values in the object.
DataFrame.min: Minimum of the values in the object.
DataFrame.mean: Mean of the values.
DataFrame.std: Standard deviation of the observations.
DataFrame.select_dtypes: Subset of a DataFrame including/excluding
columns based on their dtype.
Notes
-----
For numeric data, the result's index will include ``count``,
``mean``, ``std``, ``min``, ``max`` as well as lower, ``50`` and
upper percentiles. By default the lower percentile is ``25`` and the
upper percentile is ``75``. The ``50`` percentile is the
same as the median.
For object data (e.g. strings or timestamps), the result's index
will include ``count``, ``unique``, ``top``, and ``freq``. The ``top``
is the most common value. The ``freq`` is the most common value's
frequency. Timestamps also include the ``first`` and ``last`` items.
If multiple object values have the highest count, then the
``count`` and ``top`` results will be arbitrarily chosen from
among those with the highest count.
For mixed data types provided via a ``DataFrame``, the default is to
return only an analysis of numeric columns. If the dataframe consists
only of object and categorical data without any numeric columns, the
default is to return an analysis of both the object and categorical
columns. If ``include='all'`` is provided as an option, the result
will include a union of attributes of each type.
The `include` and `exclude` parameters can be used to limit
which columns in a ``DataFrame`` are analyzed for the output.
The parameters are ignored when analyzing a ``Series``.
Examples
--------
Describing a numeric ``Series``.
>>> s = pd.Series([1, 2, 3])
>>> s.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
dtype: float64
Describing a categorical ``Series``.
>>> s = pd.Series(['a', 'a', 'b', 'c'])
>>> s.describe()
count 4
unique 3
top a
freq 2
dtype: object
Describing a timestamp ``Series``.
>>> s = pd.Series([
... np.datetime64("2000-01-01"),
... np.datetime64("2010-01-01"),
... np.datetime64("2010-01-01")
... ])
>>> s.describe()
count 3
unique 2
top 2010-01-01 00:00:00
freq 2
first 2000-01-01 00:00:00
last 2010-01-01 00:00:00
dtype: object
Describing a ``DataFrame``. By default only numeric fields
are returned.
>>> df = pd.DataFrame({'categorical': pd.Categorical(['d','e','f']),
... 'numeric': [1, 2, 3],
... 'object': ['a', 'b', 'c']
... })
>>> df.describe()
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Describing all columns of a ``DataFrame`` regardless of data type.
>>> df.describe(include='all')
categorical numeric object
count 3 3.0 3
unique 3 NaN 3
top f NaN c
freq 1 NaN 1
mean NaN 2.0 NaN
std NaN 1.0 NaN
min NaN 1.0 NaN
25% NaN 1.5 NaN
50% NaN 2.0 NaN
75% NaN 2.5 NaN
max NaN 3.0 NaN
Describing a column from a ``DataFrame`` by accessing it as
an attribute.
>>> df.numeric.describe()
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Name: numeric, dtype: float64
Including only numeric columns in a ``DataFrame`` description.
>>> df.describe(include=[np.number])
numeric
count 3.0
mean 2.0
std 1.0
min 1.0
25% 1.5
50% 2.0
75% 2.5
max 3.0
Including only string columns in a ``DataFrame`` description.
>>> df.describe(include=[np.object])
object
count 3
unique 3
top c
freq 1
Including only categorical columns from a ``DataFrame`` description.
>>> df.describe(include=['category'])
categorical
count 3
unique 3
top f
freq 1
Excluding numeric columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.number])
categorical object
count 3 3
unique 3 3
top f c
freq 1 1
Excluding object columns from a ``DataFrame`` description.
>>> df.describe(exclude=[np.object])
categorical numeric
count 3 3.0
unique 3 NaN
top f NaN
freq 1 NaN
mean NaN 2.0
std NaN 1.0
min NaN 1.0
25% NaN 1.5
50% NaN 2.0
75% NaN 2.5
max NaN 3.0
"""
if self.ndim == 2 and self.columns.size == 0:
raise ValueError("Cannot describe a DataFrame without columns")
if percentiles is not None:
# explicit conversion of `percentiles` to list
percentiles = list(percentiles)
# get them all to be in [0, 1]
self._check_percentile(percentiles)
# median should always be included
if 0.5 not in percentiles:
percentiles.append(0.5)
percentiles = np.asarray(percentiles)
else:
percentiles = np.array([0.25, 0.5, 0.75])
# sort and check for duplicates
unique_pcts = np.unique(percentiles)
if len(unique_pcts) < len(percentiles):
raise ValueError("percentiles cannot contain duplicates")
percentiles = unique_pcts
formatted_percentiles = format_percentiles(percentiles)
def describe_numeric_1d(series):
stat_index = (
["count", "mean", "std", "min"] + formatted_percentiles + ["max"]
)
d = (
[series.count(), series.mean(), series.std(), series.min()]
+ series.quantile(percentiles).tolist()
+ [series.max()]
)
return pd.Series(d, index=stat_index, name=series.name)
def describe_categorical_1d(data):
names = ["count", "unique"]
objcounts = data.value_counts()
count_unique = len(objcounts[objcounts != 0])
result = [data.count(), count_unique]
dtype = None
if result[1] > 0:
top, freq = objcounts.index[0], objcounts.iloc[0]
if is_datetime64_any_dtype(data):
tz = data.dt.tz
asint = data.dropna().values.view("i8")
top = Timestamp(top)
if top.tzinfo is not None and tz is not None:
# Don't tz_localize(None) if key is already tz-aware
top = top.tz_convert(tz)
else:
top = top.tz_localize(tz)
names += ["top", "freq", "first", "last"]
result += [
top,
freq,
Timestamp(asint.min(), tz=tz),
Timestamp(asint.max(), tz=tz),
]
else:
names += ["top", "freq"]
result += [top, freq]
# If the DataFrame is empty, set 'top' and 'freq' to None
# to maintain output shape consistency
else:
names += ["top", "freq"]
result += [np.nan, np.nan]
dtype = "object"
return pd.Series(result, index=names, name=data.name, dtype=dtype)
def describe_1d(data):
if is_bool_dtype(data):
return describe_categorical_1d(data)
elif is_numeric_dtype(data):
return describe_numeric_1d(data)
elif is_timedelta64_dtype(data):
return describe_numeric_1d(data)
else:
return describe_categorical_1d(data)
if self.ndim == 1:
return describe_1d(self)
elif (include is None) and (exclude is None):
# when some numerics are found, keep only numerics
data = self.select_dtypes(include=[np.number])
if len(data.columns) == 0:
data = self
elif include == "all":
if exclude is not None:
msg = "exclude must be None when include is 'all'"
raise ValueError(msg)
data = self
else:
data = self.select_dtypes(include=include, exclude=exclude)
ldesc = [describe_1d(s) for _, s in data.items()]
# set a convenient order for rows
names = []
ldesc_indexes = sorted((x.index for x in ldesc), key=len)
for idxnames in ldesc_indexes:
for name in idxnames:
if name not in names:
names.append(name)
d = pd.concat([x.reindex(names, copy=False) for x in ldesc], axis=1, sort=False)
d.columns = data.columns.copy()
return d
def _check_percentile(self, q):
"""
Validate percentiles (used by describe and quantile).
"""
msg = "percentiles should all be in the interval [0, 1]. Try {0} instead."
q = np.asarray(q)
if q.ndim == 0:
if not 0 <= q <= 1:
raise ValueError(msg.format(q / 100.0))
else:
if not all(0 <= qs <= 1 for qs in q):
raise ValueError(msg.format(q / 100.0))
return q
_shared_docs[
"pct_change"
] = """
Percentage change between the current and a prior element.
Computes the percentage change from the immediately previous row by
default. This is useful in comparing the percentage of change in a time
series of elements.
Parameters
----------
periods : int, default 1
Periods to shift for forming percent change.
fill_method : str, default 'pad'
How to handle NAs before computing percent changes.
limit : int, default None
The number of consecutive NAs to fill before stopping.
freq : DateOffset, timedelta, or offset alias string, optional
Increment to use from time series API (e.g. 'M' or BDay()).
**kwargs
Additional keyword arguments are passed into
`DataFrame.shift` or `Series.shift`.
Returns
-------
chg : Series or DataFrame
The same type as the calling object.
See Also
--------
Series.diff : Compute the difference of two elements in a Series.
DataFrame.diff : Compute the difference of two elements in a DataFrame.
Series.shift : Shift the index by some number of periods.
DataFrame.shift : Shift the index by some number of periods.
Examples
--------
**Series**
>>> s = pd.Series([90, 91, 85])
>>> s
0 90
1 91
2 85
dtype: int64
>>> s.pct_change()
0 NaN
1 0.011111
2 -0.065934
dtype: float64
>>> s.pct_change(periods=2)
0 NaN
1 NaN
2 -0.055556
dtype: float64
See the percentage change in a Series where filling NAs with last
valid observation forward to next valid.
>>> s = pd.Series([90, 91, None, 85])
>>> s
0 90.0
1 91.0
2 NaN
3 85.0
dtype: float64
>>> s.pct_change(fill_method='ffill')
0 NaN
1 0.011111
2 0.000000
3 -0.065934
dtype: float64
**DataFrame**
Percentage change in French franc, Deutsche Mark, and Italian lira from
1980-01-01 to 1980-03-01.
>>> df = pd.DataFrame({
... 'FR': [4.0405, 4.0963, 4.3149],
... 'GR': [1.7246, 1.7482, 1.8519],
... 'IT': [804.74, 810.01, 860.13]},
... index=['1980-01-01', '1980-02-01', '1980-03-01'])
>>> df
FR GR IT
1980-01-01 4.0405 1.7246 804.74
1980-02-01 4.0963 1.7482 810.01
1980-03-01 4.3149 1.8519 860.13
>>> df.pct_change()
FR GR IT
1980-01-01 NaN NaN NaN
1980-02-01 0.013810 0.013684 0.006549
1980-03-01 0.053365 0.059318 0.061876
Percentage of change in GOOG and APPL stock volume. Shows computing
the percentage change between columns.
>>> df = pd.DataFrame({
... '2016': [1769950, 30586265],
... '2015': [1500923, 40912316],
... '2014': [1371819, 41403351]},
... index=['GOOG', 'APPL'])
>>> df
2016 2015 2014
GOOG 1769950 1500923 1371819
APPL 30586265 40912316 41403351
>>> df.pct_change(axis='columns')
2016 2015 2014
GOOG NaN -0.151997 -0.086016
APPL NaN 0.337604 0.012002
"""
@Appender(_shared_docs["pct_change"] % _shared_doc_kwargs)
def pct_change(self, periods=1, fill_method="pad", limit=None, freq=None, **kwargs):
# TODO: Not sure if above is correct - need someone to confirm.
axis = self._get_axis_number(kwargs.pop("axis", self._stat_axis_name))
if fill_method is None:
data = self
else:
data = self.fillna(method=fill_method, limit=limit, axis=axis)
rs = data.div(data.shift(periods=periods, freq=freq, axis=axis, **kwargs)) - 1
rs = rs.reindex_like(data)
if freq is None:
mask = isna(com.values_from_object(data))
np.putmask(rs.values, mask, np.nan)
return rs
def _agg_by_level(self, name, axis=0, level=0, skipna=True, **kwargs):
if axis is None:
raise ValueError("Must specify 'axis' when aggregating by level.")
grouped = self.groupby(level=level, axis=axis, sort=False)
if hasattr(grouped, name) and skipna:
return getattr(grouped, name)(**kwargs)
axis = self._get_axis_number(axis)
method = getattr(type(self), name)
applyf = lambda x: method(x, axis=axis, skipna=skipna, **kwargs)
return grouped.aggregate(applyf)
@classmethod
def _add_numeric_operations(cls):
"""
Add the operations to the cls; evaluate the doc strings again
"""
axis_descr, name, name2 = _doc_parms(cls)
cls.any = _make_logical_function(
cls,
"any",
name,
name2,
axis_descr,
_any_desc,
nanops.nanany,
_any_see_also,
_any_examples,
empty_value=False,
)
cls.all = _make_logical_function(
cls,
"all",
name,
name2,
axis_descr,
_all_desc,
nanops.nanall,
_all_see_also,
_all_examples,
empty_value=True,
)
@Substitution(
desc="Return the mean absolute deviation of the values "
"for the requested axis.",
name1=name,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc)
def mad(self, axis=None, skipna=None, level=None):
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level("mad", axis=axis, level=level, skipna=skipna)
data = self._get_numeric_data()
if axis == 0:
demeaned = data - data.mean(axis=0)
else:
demeaned = data.sub(data.mean(axis=1), axis=0)
return np.abs(demeaned).mean(axis=axis, skipna=skipna)
cls.mad = mad
cls.sem = _make_stat_function_ddof(
cls,
"sem",
name,
name2,
axis_descr,
"Return unbiased standard error of the mean over requested "
"axis.\n\nNormalized by N-1 by default. This can be changed "
"using the ddof argument",
nanops.nansem,
)
cls.var = _make_stat_function_ddof(
cls,
"var",
name,
name2,
axis_descr,
"Return unbiased variance over requested axis.\n\nNormalized by "
"N-1 by default. This can be changed using the ddof argument",
nanops.nanvar,
)
cls.std = _make_stat_function_ddof(
cls,
"std",
name,
name2,
axis_descr,
"Return sample standard deviation over requested axis."
"\n\nNormalized by N-1 by default. This can be changed using the "
"ddof argument",
nanops.nanstd,
)
@Substitution(
desc="Return the compound percentage of the values for "
"the requested axis.\n\n.. deprecated:: 0.25.0",
name1=name,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also="",
examples="",
)
@Appender(_num_doc)
def compound(self, axis=None, skipna=None, level=None):
msg = (
"The 'compound' method is deprecated and will be"
"removed in a future version."
)
warnings.warn(msg, FutureWarning, stacklevel=2)
if skipna is None:
skipna = True
return (1 + self).prod(axis=axis, skipna=skipna, level=level) - 1
cls.compound = compound
cls.cummin = _make_cum_function(
cls,
"cummin",
name,
name2,
axis_descr,
"minimum",
lambda y, axis: np.minimum.accumulate(y, axis),
"min",
np.inf,
np.nan,
_cummin_examples,
)
cls.cumsum = _make_cum_function(
cls,
"cumsum",
name,
name2,
axis_descr,
"sum",
lambda y, axis: y.cumsum(axis),
"sum",
0.0,
np.nan,
_cumsum_examples,
)
cls.cumprod = _make_cum_function(
cls,
"cumprod",
name,
name2,
axis_descr,
"product",
lambda y, axis: y.cumprod(axis),
"prod",
1.0,
np.nan,
_cumprod_examples,
)
cls.cummax = _make_cum_function(
cls,
"cummax",
name,
name2,
axis_descr,
"maximum",
lambda y, axis: np.maximum.accumulate(y, axis),
"max",
-np.inf,
np.nan,
_cummax_examples,
)
cls.sum = _make_min_count_stat_function(
cls,
"sum",
name,
name2,
axis_descr,
"""Return the sum of the values for the requested axis.\n
This is equivalent to the method ``numpy.sum``.""",
nanops.nansum,
_stat_func_see_also,
_sum_examples,
)
cls.mean = _make_stat_function(
cls,
"mean",
name,
name2,
axis_descr,
"Return the mean of the values for the requested axis.",
nanops.nanmean,
)
cls.skew = _make_stat_function(
cls,
"skew",
name,
name2,
axis_descr,
"Return unbiased skew over requested axis\nNormalized by N-1.",
nanops.nanskew,
)
cls.kurt = _make_stat_function(
cls,
"kurt",
name,
name2,
axis_descr,
"Return unbiased kurtosis over requested axis using Fisher's "
"definition of\nkurtosis (kurtosis of normal == 0.0). Normalized "
"by N-1.",
nanops.nankurt,
)
cls.kurtosis = cls.kurt
cls.prod = _make_min_count_stat_function(
cls,
"prod",
name,
name2,
axis_descr,
"Return the product of the values for the requested axis.",
nanops.nanprod,
examples=_prod_examples,
)
cls.product = cls.prod
cls.median = _make_stat_function(
cls,
"median",
name,
name2,
axis_descr,
"Return the median of the values for the requested axis.",
nanops.nanmedian,
)
cls.max = _make_stat_function(
cls,
"max",
name,
name2,
axis_descr,
"""Return the maximum of the values for the requested axis.\n
If you want the *index* of the maximum, use ``idxmax``. This is
the equivalent of the ``numpy.ndarray`` method ``argmax``.""",
nanops.nanmax,
_stat_func_see_also,
_max_examples,
)
cls.min = _make_stat_function(
cls,
"min",
name,
name2,
axis_descr,
"""Return the minimum of the values for the requested axis.\n
If you want the *index* of the minimum, use ``idxmin``. This is
the equivalent of the ``numpy.ndarray`` method ``argmin``.""",
nanops.nanmin,
_stat_func_see_also,
_min_examples,
)
@classmethod
def _add_series_only_operations(cls):
"""
Add the series only operations to the cls; evaluate the doc
strings again.
"""
axis_descr, name, name2 = _doc_parms(cls)
def nanptp(values, axis=0, skipna=True):
nmax = nanops.nanmax(values, axis, skipna)
nmin = nanops.nanmin(values, axis, skipna)
warnings.warn(
"Method .ptp is deprecated and will be removed "
"in a future version. Use numpy.ptp instead.",
FutureWarning,
stacklevel=4,
)
return nmax - nmin
cls.ptp = _make_stat_function(
cls,
"ptp",
name,
name2,
axis_descr,
"""Return the difference between the maximum value and the
minimum value in the object. This is the equivalent of the
``numpy.ndarray`` method ``ptp``.\n\n.. deprecated:: 0.24.0
Use numpy.ptp instead""",
nanptp,
)
@classmethod
def _add_series_or_dataframe_operations(cls):
"""
Add the series or dataframe only operations to the cls; evaluate
the doc strings again.
"""
from pandas.core.window import EWM, Expanding, Rolling, Window
@Appender(Rolling.__doc__)
def rolling(
self,
window,
min_periods=None,
center=False,
win_type=None,
on=None,
axis=0,
closed=None,
):
axis = self._get_axis_number(axis)
if win_type is not None:
return Window(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
return Rolling(
self,
window=window,
min_periods=min_periods,
center=center,
win_type=win_type,
on=on,
axis=axis,
closed=closed,
)
cls.rolling = rolling
@Appender(Expanding.__doc__)
def expanding(self, min_periods=1, center=False, axis=0):
axis = self._get_axis_number(axis)
return Expanding(self, min_periods=min_periods, center=center, axis=axis)
cls.expanding = expanding
@Appender(EWM.__doc__)
def ewm(
self,
com=None,
span=None,
halflife=None,
alpha=None,
min_periods=0,
adjust=True,
ignore_na=False,
axis=0,
):
axis = self._get_axis_number(axis)
return EWM(
self,
com=com,
span=span,
halflife=halflife,
alpha=alpha,
min_periods=min_periods,
adjust=adjust,
ignore_na=ignore_na,
axis=axis,
)
cls.ewm = ewm
@Appender(_shared_docs["transform"] % dict(axis="", **_shared_doc_kwargs))
def transform(self, func, *args, **kwargs):
result = self.agg(func, *args, **kwargs)
if is_scalar(result) or len(result) != len(self):
raise ValueError("transforms cannot produce aggregated results")
return result
# ----------------------------------------------------------------------
# Misc methods
_shared_docs[
"valid_index"
] = """
Return index for %(position)s non-NA/null value.
Returns
-------
scalar : type of index
Notes
-----
If all elements are non-NA/null, returns None.
Also returns None for empty %(klass)s.
"""
def _find_valid_index(self, how):
"""
Retrieves the index of the first valid value.
Parameters
----------
how : {'first', 'last'}
Use this parameter to change between the first or last valid index.
Returns
-------
idx_first_valid : type of index
"""
assert how in ["first", "last"]
if len(self) == 0: # early stop
return None
is_valid = ~self.isna()
if self.ndim == 2:
is_valid = is_valid.any(1) # reduce axis 1
if how == "first":
idxpos = is_valid.values[::].argmax()
if how == "last":
idxpos = len(self) - 1 - is_valid.values[::-1].argmax()
chk_notna = is_valid.iat[idxpos]
idx = self.index[idxpos]
if not chk_notna:
return None
return idx
@Appender(
_shared_docs["valid_index"] % {"position": "first", "klass": "Series/DataFrame"}
)
def first_valid_index(self):
return self._find_valid_index("first")
@Appender(
_shared_docs["valid_index"] % {"position": "last", "klass": "Series/DataFrame"}
)
def last_valid_index(self):
return self._find_valid_index("last")
def _doc_parms(cls):
"""Return a tuple of the doc parms."""
axis_descr = "{%s}" % ", ".join(
"{0} ({1})".format(a, i) for i, a in enumerate(cls._AXIS_ORDERS)
)
name = cls._constructor_sliced.__name__ if cls._AXIS_LEN > 1 else "scalar"
name2 = cls.__name__
return axis_descr, name, name2
_num_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
Axis for the function to be applied on.
skipna : bool, default True
Exclude NA/null values when computing the result.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
%(min_count)s\
**kwargs
Additional keyword arguments to be passed to the function.
Returns
-------
%(name1)s or %(name2)s (if level specified)\
%(see_also)s\
%(examples)s
"""
_num_ddof_doc = """
%(desc)s
Parameters
----------
axis : %(axis_descr)s
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s
ddof : int, default 1
Delta Degrees of Freedom. The divisor used in calculations is N - ddof,
where N represents the number of elements.
numeric_only : bool, default None
Include only float, int, boolean columns. If None, will attempt to use
everything, then use only numeric data. Not implemented for Series.
Returns
-------
%(name1)s or %(name2)s (if level specified)\n"""
_bool_doc = """
%(desc)s
Parameters
----------
axis : {0 or 'index', 1 or 'columns', None}, default 0
Indicate which axis or axes should be reduced.
* 0 / 'index' : reduce the index, return a Series whose index is the
original column labels.
* 1 / 'columns' : reduce the columns, return a Series whose index is the
original index.
* None : reduce all axes, return a scalar.
bool_only : bool, default None
Include only boolean columns. If None, will attempt to use everything,
then use only boolean data. Not implemented for Series.
skipna : bool, default True
Exclude NA/null values. If the entire row/column is NA and skipna is
True, then the result will be %(empty_value)s, as for an empty row/column.
If skipna is False, then NA are treated as True, because these are not
equal to zero.
level : int or level name, default None
If the axis is a MultiIndex (hierarchical), count along a
particular level, collapsing into a %(name1)s.
**kwargs : any, default None
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
If level is specified, then, %(name2)s is returned; otherwise, %(name1)s
is returned.
%(see_also)s
%(examples)s"""
_all_desc = """\
Return whether all elements are True, potentially over an axis.
Returns True unless there at least one element within a series or
along a Dataframe axis that is False or equivalent (e.g. zero or
empty)."""
_all_examples = """\
Examples
--------
**Series**
>>> pd.Series([True, True]).all()
True
>>> pd.Series([True, False]).all()
False
>>> pd.Series([]).all()
True
>>> pd.Series([np.nan]).all()
True
>>> pd.Series([np.nan]).all(skipna=False)
True
**DataFrames**
Create a dataframe from a dictionary.
>>> df = pd.DataFrame({'col1': [True, True], 'col2': [True, False]})
>>> df
col1 col2
0 True True
1 True False
Default behaviour checks if column-wise values all return True.
>>> df.all()
col1 True
col2 False
dtype: bool
Specify ``axis='columns'`` to check if row-wise values all return True.
>>> df.all(axis='columns')
0 True
1 False
dtype: bool
Or ``axis=None`` for whether every value is True.
>>> df.all(axis=None)
False
"""
_all_see_also = """\
See Also
--------
Series.all : Return True if all elements are True.
DataFrame.any : Return True if one (or more) elements are True.
"""
_cnum_doc = """
Return cumulative %(desc)s over a DataFrame or Series axis.
Returns a DataFrame or Series of the same size containing the cumulative
%(desc)s.
Parameters
----------
axis : {0 or 'index', 1 or 'columns'}, default 0
The index or the name of the axis. 0 is equivalent to None or 'index'.
skipna : bool, default True
Exclude NA/null values. If an entire row/column is NA, the result
will be NA.
*args, **kwargs :
Additional keywords have no effect but might be accepted for
compatibility with NumPy.
Returns
-------
%(name1)s or %(name2)s
See Also
--------
core.window.Expanding.%(accum_func_name)s : Similar functionality
but ignores ``NaN`` values.
%(name2)s.%(accum_func_name)s : Return the %(desc)s over
%(name2)s axis.
%(name2)s.cummax : Return cumulative maximum over %(name2)s axis.
%(name2)s.cummin : Return cumulative minimum over %(name2)s axis.
%(name2)s.cumsum : Return cumulative sum over %(name2)s axis.
%(name2)s.cumprod : Return cumulative product over %(name2)s axis.
%(examples)s"""
_cummin_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummin()
0 2.0
1 NaN
2 2.0
3 -1.0
4 -1.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummin(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the minimum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummin()
A B
0 2.0 1.0
1 2.0 NaN
2 1.0 0.0
To iterate over columns and find the minimum in each row,
use ``axis=1``
>>> df.cummin(axis=1)
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
"""
_cumsum_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumsum()
0 2.0
1 NaN
2 7.0
3 6.0
4 6.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumsum(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the sum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumsum()
A B
0 2.0 1.0
1 5.0 NaN
2 6.0 1.0
To iterate over columns and find the sum in each row,
use ``axis=1``
>>> df.cumsum(axis=1)
A B
0 2.0 3.0
1 3.0 NaN
2 1.0 1.0
"""
_cumprod_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cumprod()
0 2.0
1 NaN
2 10.0
3 -10.0
4 -0.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cumprod(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the product
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cumprod()
A B
0 2.0 1.0
1 6.0 NaN
2 6.0 0.0
To iterate over columns and find the product in each row,
use ``axis=1``
>>> df.cumprod(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 0.0
"""
_cummax_examples = """\
Examples
--------
**Series**
>>> s = pd.Series([2, np.nan, 5, -1, 0])
>>> s
0 2.0
1 NaN
2 5.0
3 -1.0
4 0.0
dtype: float64
By default, NA values are ignored.
>>> s.cummax()
0 2.0
1 NaN
2 5.0
3 5.0
4 5.0
dtype: float64
To include NA values in the operation, use ``skipna=False``
>>> s.cummax(skipna=False)
0 2.0
1 NaN
2 NaN
3 NaN
4 NaN
dtype: float64
**DataFrame**
>>> df = pd.DataFrame([[2.0, 1.0],
... [3.0, np.nan],
... [1.0, 0.0]],
... columns=list('AB'))
>>> df
A B
0 2.0 1.0
1 3.0 NaN
2 1.0 0.0
By default, iterates over rows and finds the maximum
in each column. This is equivalent to ``axis=None`` or ``axis='index'``.
>>> df.cummax()
A B
0 2.0 1.0
1 3.0 NaN
2 3.0 1.0
To iterate over columns and find the maximum in each row,
use ``axis=1``
>>> df.cummax(axis=1)
A B
0 2.0 2.0
1 3.0 NaN
2 1.0 1.0
"""
_any_see_also = """\
See Also
--------
numpy.any : Numpy version of this method.
Series.any : Return whether any element is True.
Series.all : Return whether all elements are True.
DataFrame.any : Return whether any element is True over requested axis.
DataFrame.all : Return whether all elements are True over requested axis.
"""
_any_desc = """\
Return whether any element is True, potentially over an axis.
Returns False unless there at least one element within a series or
along a Dataframe axis that is True or equivalent (e.g. non-zero or
non-empty)."""
_any_examples = """\
Examples
--------
**Series**
For Series input, the output is a scalar indicating whether any element
is True.
>>> pd.Series([False, False]).any()
False
>>> pd.Series([True, False]).any()
True
>>> pd.Series([]).any()
False
>>> pd.Series([np.nan]).any()
False
>>> pd.Series([np.nan]).any(skipna=False)
True
**DataFrame**
Whether each column contains at least one True element (the default).
>>> df = pd.DataFrame({"A": [1, 2], "B": [0, 2], "C": [0, 0]})
>>> df
A B C
0 1 0 0
1 2 2 0
>>> df.any()
A True
B True
C False
dtype: bool
Aggregating over the columns.
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 2]})
>>> df
A B
0 True 1
1 False 2
>>> df.any(axis='columns')
0 True
1 True
dtype: bool
>>> df = pd.DataFrame({"A": [True, False], "B": [1, 0]})
>>> df
A B
0 True 1
1 False 0
>>> df.any(axis='columns')
0 True
1 False
dtype: bool
Aggregating over the entire DataFrame with ``axis=None``.
>>> df.any(axis=None)
True
`any` for an empty DataFrame is an empty Series.
>>> pd.DataFrame([]).any()
Series([], dtype: bool)
"""
_shared_docs[
"stat_func_example"
] = """
Examples
--------
>>> idx = pd.MultiIndex.from_arrays([
... ['warm', 'warm', 'cold', 'cold'],
... ['dog', 'falcon', 'fish', 'spider']],
... names=['blooded', 'animal'])
>>> s = pd.Series([4, 2, 0, 8], name='legs', index=idx)
>>> s
blooded animal
warm dog 4
falcon 2
cold fish 0
spider 8
Name: legs, dtype: int64
>>> s.{stat_func}()
{default_output}
{verb} using level names, as well as indices.
>>> s.{stat_func}(level='blooded')
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64
>>> s.{stat_func}(level=0)
blooded
warm {level_output_0}
cold {level_output_1}
Name: legs, dtype: int64"""
_sum_examples = _shared_docs["stat_func_example"].format(
stat_func="sum", verb="Sum", default_output=14, level_output_0=6, level_output_1=8
)
_sum_examples += """
By default, the sum of an empty or all-NA Series is ``0``.
>>> pd.Series([]).sum() # min_count=0 is the default
0.0
This can be controlled with the ``min_count`` parameter. For example, if
you'd like the sum of an empty series to be NaN, pass ``min_count=1``.
>>> pd.Series([]).sum(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).sum()
0.0
>>> pd.Series([np.nan]).sum(min_count=1)
nan"""
_max_examples = _shared_docs["stat_func_example"].format(
stat_func="max", verb="Max", default_output=8, level_output_0=4, level_output_1=8
)
_min_examples = _shared_docs["stat_func_example"].format(
stat_func="min", verb="Min", default_output=0, level_output_0=2, level_output_1=0
)
_stat_func_see_also = """
See Also
--------
Series.sum : Return the sum.
Series.min : Return the minimum.
Series.max : Return the maximum.
Series.idxmin : Return the index of the minimum.
Series.idxmax : Return the index of the maximum.
DataFrame.sum : Return the sum over the requested axis.
DataFrame.min : Return the minimum over the requested axis.
DataFrame.max : Return the maximum over the requested axis.
DataFrame.idxmin : Return the index of the minimum over the requested axis.
DataFrame.idxmax : Return the index of the maximum over the requested axis."""
_prod_examples = """
Examples
--------
By default, the product of an empty or all-NA Series is ``1``
>>> pd.Series([]).prod()
1.0
This can be controlled with the ``min_count`` parameter
>>> pd.Series([]).prod(min_count=1)
nan
Thanks to the ``skipna`` parameter, ``min_count`` handles all-NA and
empty series identically.
>>> pd.Series([np.nan]).prod()
1.0
>>> pd.Series([np.nan]).prod(min_count=1)
nan"""
_min_count_stub = """\
min_count : int, default 0
The required number of valid values to perform the operation. If fewer than
``min_count`` non-NA values are present the result will be NA.
.. versionadded:: 0.22.0
Added with the default being 0. This means the sum of an all-NA
or empty Series is 0, and the product of an all-NA or empty
Series is 1.
"""
def _make_min_count_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also="", examples=""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count=_min_count_stub,
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self,
axis=None,
skipna=None,
level=None,
numeric_only=None,
min_count=0,
**kwargs
):
if name == "sum":
nv.validate_sum(tuple(), kwargs)
elif name == "prod":
nv.validate_prod(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, min_count=min_count
)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=numeric_only,
min_count=min_count,
)
return set_function_name(stat_func, name, cls)
def _make_stat_function(
cls, name, name1, name2, axis_descr, desc, f, see_also="", examples=""
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
min_count="",
see_also=see_also,
examples=examples,
)
@Appender(_num_doc)
def stat_func(
self, axis=None, skipna=None, level=None, numeric_only=None, **kwargs
):
if name == "median":
nv.validate_median(tuple(), kwargs)
else:
nv.validate_stat_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f, name, axis=axis, skipna=skipna, numeric_only=numeric_only
)
return set_function_name(stat_func, name, cls)
def _make_stat_function_ddof(cls, name, name1, name2, axis_descr, desc, f):
@Substitution(desc=desc, name1=name1, name2=name2, axis_descr=axis_descr)
@Appender(_num_ddof_doc)
def stat_func(
self, axis=None, skipna=None, level=None, ddof=1, numeric_only=None, **kwargs
):
nv.validate_stat_ddof_func(tuple(), kwargs, fname=name)
if skipna is None:
skipna = True
if axis is None:
axis = self._stat_axis_number
if level is not None:
return self._agg_by_level(
name, axis=axis, level=level, skipna=skipna, ddof=ddof
)
return self._reduce(
f, name, axis=axis, numeric_only=numeric_only, skipna=skipna, ddof=ddof
)
return set_function_name(stat_func, name, cls)
def _make_cum_function(
cls,
name,
name1,
name2,
axis_descr,
desc,
accum_func,
accum_func_name,
mask_a,
mask_b,
examples,
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
accum_func_name=accum_func_name,
examples=examples,
)
@Appender(_cnum_doc)
def cum_func(self, axis=None, skipna=True, *args, **kwargs):
skipna = nv.validate_cum_func_with_skipna(skipna, args, kwargs, name)
if axis is None:
axis = self._stat_axis_number
else:
axis = self._get_axis_number(axis)
y = com.values_from_object(self).copy()
if skipna and issubclass(y.dtype.type, (np.datetime64, np.timedelta64)):
result = accum_func(y, axis)
mask = isna(self)
np.putmask(result, mask, iNaT)
elif skipna and not issubclass(y.dtype.type, (np.integer, np.bool_)):
mask = isna(self)
np.putmask(y, mask, mask_a)
result = accum_func(y, axis)
np.putmask(result, mask, mask_b)
else:
result = accum_func(y, axis)
d = self._construct_axes_dict()
d["copy"] = False
return self._constructor(result, **d).__finalize__(self)
return set_function_name(cum_func, name, cls)
def _make_logical_function(
cls, name, name1, name2, axis_descr, desc, f, see_also, examples, empty_value
):
@Substitution(
desc=desc,
name1=name1,
name2=name2,
axis_descr=axis_descr,
see_also=see_also,
examples=examples,
empty_value=empty_value,
)
@Appender(_bool_doc)
def logical_func(self, axis=0, bool_only=None, skipna=True, level=None, **kwargs):
nv.validate_logical_func(tuple(), kwargs, fname=name)
if level is not None:
if bool_only is not None:
raise NotImplementedError(
"Option bool_only is not implemented with option level."
)
return self._agg_by_level(name, axis=axis, level=level, skipna=skipna)
return self._reduce(
f,
name,
axis=axis,
skipna=skipna,
numeric_only=bool_only,
filter_type="bool",
)
return set_function_name(logical_func, name, cls)
# install the indexes
for _name, _indexer in indexing.get_indexers_list():
NDFrame._create_indexer(_name, _indexer)
|
py
|
1a5b36d33604e858d02fabdd1ec760775ac15c8b
|
# !/usr/bin/env python
# -*- coding: UTF-8 -*-
#
#
# ==================
# VIZ MARKDOWN - multiple file, markdown format
# ==================
import os, os.path, sys
import json
from ..utils import *
from ..builder import * # loads and sets up Django
from ..viz_factory import VizFactory
class SPDXViz(VizFactory):
"""
A simple markdown rendering in multi pages
"""
def __init__(self, ontospy_graph, title=""):
"""
Init
"""
super(SPDXViz, self).__init__(ontospy_graph, title)
def _buildTemplates(self):
"""
OVERRIDING THIS METHOD from Factory
"""
# Ontology - MAIN PAGE
contents = self._renderTemplate(
"spdx/markdown_ontoinfo.md", extraContext=None)
FILE_NAME = "index.md"
main_url = self._save2File(contents, FILE_NAME, self.output_path)
browser_output_path = self.output_path
if self.ontospy_graph.all_classes:
# BROWSER PAGES - CLASSES ======
for entity in self.ontospy_graph.all_classes:
desc = ""
if os.path.isfile(entity.slug + "_desc.md"):
file = open(entity.slug + "_desc.md")
desc = file.read()
extra_context = {
"main_entity": entity,
"main_entity_type": "class",
"ontograph": self.ontospy_graph,
"external_description": desc
}
contents = self._renderTemplate(
"spdx/markdown_classinfo.md",
extraContext=extra_context)
FILE_NAME = entity.slug + ".md"
self._save2File(contents, FILE_NAME, browser_output_path)
if self.ontospy_graph.all_properties:
# BROWSER PAGES - PROPERTIES ======
for entity in self.ontospy_graph.all_properties:
extra_context = {
"main_entity": entity,
"main_entity_type": "property",
"ontograph": self.ontospy_graph
}
contents = self._renderTemplate(
"spdx/markdown_propinfo.md",
extraContext=extra_context)
FILE_NAME = entity.slug + ".md"
self._save2File(contents, FILE_NAME, browser_output_path)
if self.ontospy_graph.all_skos_concepts:
# BROWSER PAGES - CONCEPTS ======
for entity in self.ontospy_graph.all_skos_concepts:
extra_context = {
"main_entity": entity,
"main_entity_type": "concept",
"ontograph": self.ontospy_graph
}
contents = self._renderTemplate(
"spdx/markdown_conceptinfo.md",
extraContext=extra_context)
FILE_NAME = entity.slug + ".ms"
self._save2File(contents, FILE_NAME, browser_output_path)
return main_url
# if called directly, for testing purposes pick a random ontology
if __name__ == '__main__':
TEST_ONLINE = False
try:
g = get_onto_for_testing(TEST_ONLINE)
v = SPDXViz(g, title="")
v.build()
v.preview()
sys.exit(0)
except KeyboardInterrupt as e: # Ctrl-C
raise e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.