repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
SyNet | SyNet-master/tensorpack/tensorpack/models/shapes.py | # -*- coding: utf-8 -*-
# File: shapes.py
import tensorflow as tf
from .common import layer_register
__all__ = ['ConcatWith']
@layer_register(use_scope=None)
def ConcatWith(x, tensor, dim):
"""
A wrapper around ``tf.concat`` to cooperate with :class:`LinearWrap`.
Args:
x (tf.Tensor): input
tensor (list[tf.Tensor]): a tensor or list of tensors to concatenate with x.
x will be at the beginning
dim (int): the dimension along which to concatenate
Returns:
tf.Tensor: ``tf.concat([x] + tensor, dim)``
"""
if type(tensor) != list:
tensor = [tensor]
return tf.concat([x] + tensor, dim)
| 671 | 22.172414 | 84 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/common.py | # -*- coding: utf-8 -*-
# File: common.py
from .registry import layer_register, disable_layer_logging # noqa
from .tflayer import rename_tflayer_get_variable
from .utils import VariableHolder # noqa
__all__ = ['layer_register', 'VariableHolder', 'rename_tflayer_get_variable',
'disable_layer_logging']
| 317 | 30.8 | 77 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .batch_norm import *
from .common import *
from .conv2d import *
from .fc import *
from .layer_norm import *
from .linearwrap import *
from .nonlin import *
from .pool import *
from .regularize import *
from pkgutil import iter_modules
import os
import os.path
# this line is necessary for _TFModuleFunc to work
import tensorflow as tf # noqa: F401
__all__ = []
def _global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else dir(p)
del globals()[name]
for k in lst:
if not k.startswith('__'):
globals()[k] = p.__dict__[k]
__all__.append(k)
_CURR_DIR = os.path.dirname(__file__)
_SKIP = ['utils', 'registry', 'tflayer']
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if module_name.startswith('_'):
continue
if "_test" in module_name:
continue
if module_name not in _SKIP:
_global_import(module_name)
| 1,355 | 25.588235 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/layer_norm.py | # -*- coding: utf-8 -*-
# File: layer_norm.py
from ..compat import tfv1 as tf # this should be avoided first in model code
from ..utils.argtools import get_data_format
from ..utils.develop import log_deprecated
from .common import VariableHolder, layer_register
from .tflayer import convert_to_tflayer_args
__all__ = ['LayerNorm', 'InstanceNorm']
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'use_bias': 'center',
'use_scale': 'scale',
'gamma_init': 'gamma_initializer',
})
def LayerNorm(
x, epsilon=1e-5, *,
center=True, scale=True,
gamma_initializer=tf.ones_initializer(),
data_format='channels_last'):
"""
Layer Normalization layer, as described in the paper:
`Layer Normalization <https://arxiv.org/abs/1607.06450>`_.
Args:
x (tf.Tensor): a 4D or 2D tensor. When 4D, the layout should match data_format.
epsilon (float): epsilon to avoid divide-by-zero.
center, scale (bool): whether to use the extra affine transformation or not.
"""
data_format = get_data_format(data_format, keras_mode=False)
shape = x.get_shape().as_list()
ndims = len(shape)
assert ndims in [2, 4]
mean, var = tf.nn.moments(x, list(range(1, len(shape))), keep_dims=True)
if data_format == 'NCHW':
chan = shape[1]
new_shape = [1, chan, 1, 1]
else:
chan = shape[-1]
new_shape = [1, 1, 1, chan]
if ndims == 2:
new_shape = [1, chan]
if center:
beta = tf.get_variable('beta', [chan], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
else:
beta = tf.zeros([1] * ndims, name='beta')
if scale:
gamma = tf.get_variable('gamma', [chan], initializer=gamma_initializer)
gamma = tf.reshape(gamma, new_shape)
else:
gamma = tf.ones([1] * ndims, name='gamma')
ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
vh = ret.variables = VariableHolder()
if scale:
vh.gamma = gamma
if center:
vh.beta = beta
return ret
@layer_register()
@convert_to_tflayer_args(
args_names=[],
name_mapping={
'gamma_init': 'gamma_initializer',
})
def InstanceNorm(x, epsilon=1e-5, *, center=True, scale=True,
gamma_initializer=tf.ones_initializer(),
data_format='channels_last', use_affine=None):
"""
Instance Normalization, as in the paper:
`Instance Normalization: The Missing Ingredient for Fast Stylization
<https://arxiv.org/abs/1607.08022>`_.
Args:
x (tf.Tensor): a 4D tensor.
epsilon (float): avoid divide-by-zero
center, scale (bool): whether to use the extra affine transformation or not.
use_affine: deprecated. Don't use.
"""
data_format = get_data_format(data_format, keras_mode=False)
shape = x.get_shape().as_list()
assert len(shape) == 4, "Input of InstanceNorm has to be 4D!"
if use_affine is not None:
log_deprecated("InstanceNorm(use_affine=)", "Use center= or scale= instead!", "2020-06-01")
center = scale = use_affine
if data_format == 'NHWC':
axis = [1, 2]
ch = shape[3]
new_shape = [1, 1, 1, ch]
else:
axis = [2, 3]
ch = shape[1]
new_shape = [1, ch, 1, 1]
assert ch is not None, "Input of InstanceNorm require known channel!"
mean, var = tf.nn.moments(x, axis, keep_dims=True)
if center:
beta = tf.get_variable('beta', [ch], initializer=tf.constant_initializer())
beta = tf.reshape(beta, new_shape)
else:
beta = tf.zeros([1, 1, 1, 1], name='beta', dtype=x.dtype)
if scale:
gamma = tf.get_variable('gamma', [ch], initializer=gamma_initializer)
gamma = tf.reshape(gamma, new_shape)
else:
gamma = tf.ones([1, 1, 1, 1], name='gamma', dtype=x.dtype)
ret = tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon, name='output')
vh = ret.variables = VariableHolder()
if scale:
vh.gamma = gamma
if center:
vh.beta = beta
return ret
| 4,188 | 30.734848 | 99 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/nonlin.py | # -*- coding: utf-8 -*-
# File: nonlin.py
import tensorflow as tf
from ..utils.develop import log_deprecated
from ..compat import tfv1
from .batch_norm import BatchNorm
from .common import VariableHolder, layer_register
from .utils import disable_autograph
__all__ = ['Maxout', 'PReLU', 'BNReLU']
@layer_register(use_scope=None)
def Maxout(x, num_unit):
"""
Maxout as in the paper `Maxout Networks <http://arxiv.org/abs/1302.4389>`_.
Args:
x (tf.Tensor): a NHWC or NC tensor. Channel has to be known.
num_unit (int): a int. Must be divisible by C.
Returns:
tf.Tensor: of shape NHW(C/num_unit) named ``output``.
"""
input_shape = x.get_shape().as_list()
ndim = len(input_shape)
assert ndim == 4 or ndim == 2
ch = input_shape[-1]
assert ch is not None and ch % num_unit == 0
if ndim == 4:
x = tf.reshape(x, [-1, input_shape[1], input_shape[2], ch / num_unit, num_unit])
else:
x = tf.reshape(x, [-1, ch / num_unit, num_unit])
return tf.reduce_max(x, ndim, name='output')
@layer_register()
@disable_autograph()
def PReLU(x, init=0.001, name=None):
"""
Parameterized ReLU as in the paper `Delving Deep into Rectifiers: Surpassing
Human-Level Performance on ImageNet Classification
<http://arxiv.org/abs/1502.01852>`_.
Args:
x (tf.Tensor): input
init (float): initial value for the learnable slope.
name (str): deprecated argument. Don't use
Variable Names:
* ``alpha``: learnable slope.
"""
if name is not None:
log_deprecated("PReLU(name=...)", "The output tensor will be named `output`.")
init = tfv1.constant_initializer(init)
alpha = tfv1.get_variable('alpha', [], initializer=init)
x = ((1 + alpha) * x + (1 - alpha) * tf.abs(x))
ret = tf.multiply(x, 0.5, name=name or None)
ret.variables = VariableHolder(alpha=alpha)
return ret
@layer_register(use_scope=None)
def BNReLU(x, name=None):
"""
A shorthand of BatchNormalization + ReLU.
Args:
x (tf.Tensor): the input
name: deprecated, don't use.
"""
if name is not None:
log_deprecated("BNReLU(name=...)", "The output tensor will be named `output`.")
x = BatchNorm('bn', x)
x = tf.nn.relu(x, name=name)
return x
| 2,318 | 26.939759 | 88 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/pool.py | # -*- coding: utf-8 -*-
# File: pool.py
import numpy as np
from ..compat import tfv1 as tf # this should be avoided first in model code
from ..utils.argtools import get_data_format, shape2d
from .common import layer_register
from .shape_utils import StaticDynamicShape
from .tflayer import convert_to_tflayer_args
__all__ = ['MaxPooling', 'FixedUnPooling', 'AvgPooling', 'GlobalAvgPooling']
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['pool_size', 'strides'],
name_mapping={'shape': 'pool_size', 'stride': 'strides'})
def MaxPooling(
inputs,
pool_size,
strides=None,
padding='valid',
data_format='channels_last'):
"""
Same as `tf.layers.MaxPooling2D`. Default strides is equal to pool_size.
"""
if strides is None:
strides = pool_size
layer = tf.layers.MaxPooling2D(pool_size, strides, padding=padding, data_format=data_format)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
return tf.identity(ret, name='output')
@layer_register(log_shape=True)
@convert_to_tflayer_args(
args_names=['pool_size', 'strides'],
name_mapping={'shape': 'pool_size', 'stride': 'strides'})
def AvgPooling(
inputs,
pool_size,
strides=None,
padding='valid',
data_format='channels_last'):
"""
Same as `tf.layers.AveragePooling2D`. Default strides is equal to pool_size.
"""
if strides is None:
strides = pool_size
layer = tf.layers.AveragePooling2D(pool_size, strides, padding=padding, data_format=data_format)
ret = layer.apply(inputs, scope=tf.get_variable_scope())
return tf.identity(ret, name='output')
@layer_register(log_shape=True)
def GlobalAvgPooling(x, data_format='channels_last'):
"""
Global average pooling as in the paper `Network In Network
<http://arxiv.org/abs/1312.4400>`_.
Args:
x (tf.Tensor): a 4D tensor.
Returns:
tf.Tensor: a NC tensor named ``output``.
"""
assert x.shape.ndims == 4
data_format = get_data_format(data_format)
axis = [1, 2] if data_format == 'channels_last' else [2, 3]
return tf.reduce_mean(x, axis, name='output')
def UnPooling2x2ZeroFilled(x):
# https://github.com/tensorflow/tensorflow/issues/2169
out = tf.concat([x, tf.zeros_like(x)], 3)
out = tf.concat([out, tf.zeros_like(out)], 2)
sh = x.get_shape().as_list()
if None not in sh[1:]:
out_size = [-1, sh[1] * 2, sh[2] * 2, sh[3]]
return tf.reshape(out, out_size)
else:
shv = tf.shape(x)
ret = tf.reshape(out, tf.stack([-1, shv[1] * 2, shv[2] * 2, sh[3]]))
return ret
@layer_register(log_shape=True)
def FixedUnPooling(x, shape, unpool_mat=None, data_format='channels_last'):
"""
Unpool the input with a fixed matrix to perform kronecker product with.
Args:
x (tf.Tensor): a 4D image tensor
shape: int or (h, w) tuple
unpool_mat: a tf.Tensor or np.ndarray 2D matrix with size=shape.
If is None, will use a matrix with 1 at top-left corner.
Returns:
tf.Tensor: a 4D image tensor.
"""
data_format = get_data_format(data_format, keras_mode=False)
shape = shape2d(shape)
output_shape = StaticDynamicShape(x)
output_shape.apply(1 if data_format == 'NHWC' else 2, lambda x: x * shape[0])
output_shape.apply(2 if data_format == 'NHWC' else 3, lambda x: x * shape[1])
# a faster implementation for this special case
if shape[0] == 2 and shape[1] == 2 and unpool_mat is None and data_format == 'NHWC':
ret = UnPooling2x2ZeroFilled(x)
else:
# check unpool_mat
if unpool_mat is None:
mat = np.zeros(shape, dtype='float32')
mat[0][0] = 1
unpool_mat = tf.constant(mat, name='unpool_mat')
elif isinstance(unpool_mat, np.ndarray):
unpool_mat = tf.constant(unpool_mat, name='unpool_mat')
assert unpool_mat.shape.as_list() == list(shape)
if data_format == 'NHWC':
x = tf.transpose(x, [0, 3, 1, 2])
# perform a tensor-matrix kronecker product
x = tf.expand_dims(x, -1) # bchwx1
mat = tf.expand_dims(unpool_mat, 0) # 1xshxsw
ret = tf.tensordot(x, mat, axes=1) # bxcxhxwxshxsw
if data_format == 'NHWC':
ret = tf.transpose(ret, [0, 2, 4, 3, 5, 1])
else:
ret = tf.transpose(ret, [0, 1, 2, 4, 3, 5])
shape3_dyn = [output_shape.get_dynamic(k) for k in range(1, 4)]
ret = tf.reshape(ret, tf.stack([-1] + shape3_dyn))
ret.set_shape(tf.TensorShape(output_shape.get_static()))
return ret
| 4,686 | 32.719424 | 100 | py |
SyNet | SyNet-master/tensorpack/tensorpack/models/regularize.py | # -*- coding: utf-8 -*-
# File: regularize.py
import re
import tensorflow as tf
from ..compat import tfv1
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.tower import get_current_tower_context
from ..utils import logger
from ..utils.argtools import graph_memoized
from .common import layer_register
__all__ = ['regularize_cost', 'regularize_cost_from_collection',
'l2_regularizer', 'l1_regularizer', 'Dropout']
@graph_memoized
def _log_once(msg):
logger.info(msg)
if get_tf_version_tuple() <= (1, 12):
l2_regularizer = tf.contrib.layers.l2_regularizer # deprecated
l1_regularizer = tf.contrib.layers.l1_regularizer # deprecated
else:
# oh these little dirty details
l2_regularizer = lambda x: tf.keras.regularizers.l2(x * 0.5) # noqa
l1_regularizer = tf.keras.regularizers.l1
def regularize_cost(regex, func, name='regularize_cost'):
"""
Apply a regularizer on trainable variables matching the regex, and print
the matched variables (only print once in multi-tower training).
In replicated mode, it will only regularize variables within the current tower.
If called under a TowerContext with `is_training==False`, this function returns a zero constant tensor.
Args:
regex (str): a regex to match variable names, e.g. "conv.*/W"
func: the regularization function, which takes a tensor and returns a scalar tensor.
E.g., ``tf.nn.l2_loss, tf.contrib.layers.l1_regularizer(0.001)``.
Returns:
tf.Tensor: a scalar, the total regularization cost.
Example:
.. code-block:: python
cost = cost + regularize_cost("fc.*/W", l2_regularizer(1e-5))
"""
assert len(regex)
ctx = get_current_tower_context()
if not ctx.is_training:
# Currently cannot build the wd_cost correctly at inference,
# because ths vs_name used in inference can be '', therefore the
# variable filter will fail
return tf.constant(0, dtype=tf.float32, name='empty_' + name)
# If vars are shared, regularize all of them
# If vars are replicated, only regularize those in the current tower
if ctx.has_own_variables:
params = ctx.get_collection_in_tower(tfv1.GraphKeys.TRAINABLE_VARIABLES)
else:
params = tfv1.trainable_variables()
names = []
with tfv1.name_scope(name + '_internals'):
costs = []
for p in params:
para_name = p.op.name
if re.search(regex, para_name):
regloss = func(p)
assert regloss.dtype.is_floating, regloss
# Some variables may not be fp32, but it should
# be fine to assume regularization in fp32
if regloss.dtype != tf.float32:
regloss = tf.cast(regloss, tf.float32)
costs.append(regloss)
names.append(p.name)
if not costs:
return tf.constant(0, dtype=tf.float32, name='empty_' + name)
# remove tower prefix from names, and print
if len(ctx.vs_name):
prefix = ctx.vs_name + '/'
prefixlen = len(prefix)
def f(name):
if name.startswith(prefix):
return name[prefixlen:]
return name
names = list(map(f, names))
logger.info("regularize_cost() found {} variables to regularize.".format(len(names)))
_log_once("The following tensors will be regularized: {}".format(', '.join(names)))
return tf.add_n(costs, name=name)
def regularize_cost_from_collection(name='regularize_cost'):
"""
Get the cost from the regularizers in ``tf.GraphKeys.REGULARIZATION_LOSSES``.
If in replicated mode, will only regularize variables created within the current tower.
Args:
name (str): the name of the returned tensor
Returns:
tf.Tensor: a scalar, the total regularization cost.
"""
ctx = get_current_tower_context()
if not ctx.is_training:
# TODO Currently cannot build the wd_cost correctly at inference,
# because ths vs_name used in inference can be '', therefore the
# variable filter will fail
return tf.constant(0, dtype=tf.float32, name='empty_' + name)
# NOTE: this collection doesn't always grow with towers.
# It only grows with actual variable creation, but not get_variable call.
if ctx.has_own_variables: # be careful of the first tower (name='')
losses = ctx.get_collection_in_tower(tfv1.GraphKeys.REGULARIZATION_LOSSES)
else:
losses = tfv1.get_collection(tfv1.GraphKeys.REGULARIZATION_LOSSES)
if len(losses) > 0:
logger.info("regularize_cost_from_collection() found {} regularizers "
"in REGULARIZATION_LOSSES collection.".format(len(losses)))
def maploss(l):
assert l.dtype.is_floating, l
if l.dtype != tf.float32:
l = tf.cast(l, tf.float32)
return l
losses = [maploss(l) for l in losses]
reg_loss = tf.add_n(losses, name=name)
return reg_loss
else:
return tf.constant(0, dtype=tf.float32, name='empty_' + name)
@layer_register(use_scope=None)
def Dropout(x, *args, **kwargs):
"""
Same as `tf.layers.dropout`.
However, for historical reasons, the first positional argument is
interpreted as keep_prob rather than drop_prob.
Explicitly use `rate=` keyword arguments to ensure things are consistent.
"""
if 'is_training' in kwargs:
kwargs['training'] = kwargs.pop('is_training')
if len(args) > 0:
if args[0] != 0.5:
logger.warn(
"The first positional argument to tensorpack.Dropout is the probability to keep, rather than to drop. "
"This is different from the rate argument in tf.layers.Dropout due to historical reasons. "
"To mimic tf.layers.Dropout, explicitly use keyword argument 'rate' instead")
rate = 1 - args[0]
elif 'keep_prob' in kwargs:
assert 'rate' not in kwargs, "Cannot set both keep_prob and rate!"
rate = 1 - kwargs.pop('keep_prob')
elif 'rate' in kwargs:
rate = kwargs.pop('rate')
else:
rate = 0.5
if kwargs.get('training', None) is None:
kwargs['training'] = get_current_tower_context().is_training
if get_tf_version_tuple() <= (1, 12):
return tf.layers.dropout(x, rate=rate, **kwargs)
else:
return tf.nn.dropout(x, rate=rate if kwargs['training'] else 0.)
| 6,528 | 36.096591 | 119 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/base.py | # -*- coding: utf-8 -*-
# File: base.py
import threading
from abc import ABCMeta, abstractmethod
import six
from ..utils.utils import get_rng
__all__ = ['DataFlow', 'ProxyDataFlow', 'RNGDataFlow', 'DataFlowTerminated']
class DataFlowTerminated(BaseException):
"""
An exception indicating that the DataFlow is unable to produce any more
data, i.e. something wrong happened so that calling :meth:`get_data`
cannot give a valid iterator any more.
In most DataFlow this will never be raised.
"""
pass
class DataFlowReentrantGuard(object):
"""
A tool to enforce non-reentrancy.
Mostly used on DataFlow whose :meth:`get_data` is stateful,
so that multiple instances of the iterator cannot co-exist.
"""
def __init__(self):
self._lock = threading.Lock()
def __enter__(self):
self._succ = self._lock.acquire(False)
if not self._succ:
raise threading.ThreadError("This DataFlow is not reentrant!")
def __exit__(self, exc_type, exc_val, exc_tb):
self._lock.release()
return False
class DataFlowMeta(ABCMeta):
"""
DataFlow uses "__iter__()" and "__len__()" instead of
"get_data()" and "size()". This add back-compatibility.
"""
def __new__(mcls, name, bases, namespace, **kwargs):
def hot_patch(required, existing):
if required not in namespace and existing in namespace:
namespace[required] = namespace[existing]
hot_patch('__iter__', 'get_data')
hot_patch('__len__', 'size')
return ABCMeta.__new__(mcls, name, bases, namespace, **kwargs)
@six.add_metaclass(DataFlowMeta)
class DataFlow(object):
""" Base class for all DataFlow """
@abstractmethod
def __iter__(self):
"""
* A dataflow is an iterable. The :meth:`__iter__` method should yield a list or dict each time.
Note that dict is **partially** supported at the moment: certain dataflow does not support dict.
* The :meth:`__iter__` method can be either finite (will stop iteration) or infinite
(will not stop iteration). For a finite dataflow, :meth:`__iter__` can be called
again immediately after the previous call returned.
* For many dataflow, the :meth:`__iter__` method is non-reentrant, which means for an dataflow
instance ``df``, :meth:`df.__iter__` cannot be called before the previous
:meth:`df.__iter__` call has finished (iteration has stopped).
When a dataflow is non-reentrant, :meth:`df.__iter__` should throw an exception if
called before the previous call has finished.
For such non-reentrant dataflows, if you need to use the same dataflow in two places,
you need to create two dataflow instances.
Yields:
list/dict: The datapoint, i.e. list/dict of components.
"""
def __len__(self):
"""
* A dataflow can optionally implement :meth:`__len__`. If not implemented, it will
throw :class:`NotImplementedError`.
* It returns an integer representing the size of the dataflow.
The return value **may not be accurate or meaningful** at all.
When saying the length is "accurate", it means that
:meth:`__iter__` will always yield this many of datapoints before it stops iteration.
* There could be many reasons why :meth:`__len__` is inaccurate.
For example, some dataflow has dynamic size, if it throws away datapoints on the fly.
Some dataflow mixes the datapoints between consecutive passes over
the dataset, due to parallelism and buffering.
In this case it does not make sense to stop the iteration anywhere.
* Due to the above reasons, the length is only a rough guidance.
And it's up to the user how to interpret it.
Inside tensorpack it's only used in these places:
+ A default ``steps_per_epoch`` in training, but you probably want to customize
it yourself, especially when using data-parallel trainer.
+ The length of progress bar when processing a dataflow.
+ Used by :class:`InferenceRunner` to get the number of iterations in inference.
In this case users are **responsible** for making sure that :meth:`__len__` is "accurate".
This is to guarantee that inference is run on a fixed set of images.
Returns:
int: rough size of this dataflow.
Raises:
:class:`NotImplementedError` if this DataFlow doesn't have a size.
"""
raise NotImplementedError()
def reset_state(self):
"""
* The caller must guarantee that :meth:`reset_state` should be called **once and only once**
by the **process that uses the dataflow** before :meth:`__iter__` is called.
The caller thread of this method should stay alive to keep this dataflow alive.
* It is meant for certain initialization that involves processes,
e.g., initialize random number generators (RNG), create worker processes.
Because it's very common to use RNG in data processing,
developers of dataflow can also subclass :class:`RNGDataFlow` to have easier access to
a properly-initialized RNG.
* A dataflow is not fork-safe after :meth:`reset_state` is called (because this will violate the guarantee).
There are a few other dataflows that are not fork-safe anytime, which will be mentioned in the docs.
* You should take the responsibility and follow the above guarantee if you're the caller of a dataflow yourself
(either when you're using dataflow outside of tensorpack, or if you're writing a wrapper dataflow).
* Tensorpack's built-in forking dataflows (:class:`MultiProcessRunner`, :class:`MultiProcessMapData`, etc)
and other component that uses dataflows (:class:`InputSource`)
already take care of the responsibility of calling this method.
"""
pass
# These are the old (overly verbose) names for the methods:
def get_data(self):
return self.__iter__()
def size(self):
return self.__len__()
class RNGDataFlow(DataFlow):
""" A DataFlow with RNG"""
rng = None
"""
``self.rng`` is a ``np.random.RandomState`` instance that is initialized
correctly (with different seeds in each process) in ``RNGDataFlow.reset_state()``.
"""
def reset_state(self):
""" Reset the RNG """
self.rng = get_rng(self)
class ProxyDataFlow(DataFlow):
""" Base class for DataFlow that proxies another.
Every method is proxied to ``self.ds`` unless overriden by a subclass.
"""
def __init__(self, ds):
"""
Args:
ds (DataFlow): DataFlow to proxy.
"""
self.ds = ds
def reset_state(self):
self.ds.reset_state()
def __len__(self):
return self.ds.__len__()
def __iter__(self):
return self.ds.__iter__()
| 7,093 | 36.734043 | 119 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/image.py | # -*- coding: utf-8 -*-
# File: image.py
import copy as copy_mod
import numpy as np
from contextlib import contextmanager
from ..utils import logger
from ..utils.argtools import shape2d
from .base import RNGDataFlow
from .common import MapData, MapDataComponent
__all__ = ['ImageFromFile', 'AugmentImageComponent', 'AugmentImageCoordinates', 'AugmentImageComponents']
def check_dtype(img):
assert isinstance(img, np.ndarray), "[Augmentor] Needs an numpy array, but got a {}!".format(type(img))
assert not isinstance(img.dtype, np.integer) or (img.dtype == np.uint8), \
"[Augmentor] Got image of type {}, use uint8 or floating points instead!".format(img.dtype)
def validate_coords(coords):
assert coords.ndim == 2, coords.ndim
assert coords.shape[1] == 2, coords.shape
assert np.issubdtype(coords.dtype, np.float), coords.dtype
class ExceptionHandler:
def __init__(self, catch_exceptions=False):
self._nr_error = 0
self.catch_exceptions = catch_exceptions
@contextmanager
def catch(self):
try:
yield
except Exception:
self._nr_error += 1
if not self.catch_exceptions:
raise
else:
if self._nr_error % 100 == 0 or self._nr_error < 10:
logger.exception("Got {} augmentation errors.".format(self._nr_error))
class ImageFromFile(RNGDataFlow):
""" Produce images read from a list of files as (h, w, c) arrays. """
def __init__(self, files, channel=3, resize=None, shuffle=False):
"""
Args:
files (list): list of file paths.
channel (int): 1 or 3. Will convert grayscale to RGB images if channel==3.
Will produce (h, w, 1) array if channel==1.
resize (tuple): int or (h, w) tuple. If given, resize the image.
"""
assert len(files), "No image files given to ImageFromFile!"
self.files = files
self.channel = int(channel)
assert self.channel in [1, 3], self.channel
self.imread_mode = cv2.IMREAD_GRAYSCALE if self.channel == 1 else cv2.IMREAD_COLOR
if resize is not None:
resize = shape2d(resize)
self.resize = resize
self.shuffle = shuffle
def __len__(self):
return len(self.files)
def __iter__(self):
if self.shuffle:
self.rng.shuffle(self.files)
for f in self.files:
im = cv2.imread(f, self.imread_mode)
assert im is not None, f
if self.channel == 3:
im = im[:, :, ::-1]
if self.resize is not None:
im = cv2.resize(im, tuple(self.resize[::-1]))
if self.channel == 1:
im = im[:, :, np.newaxis]
yield [im]
class AugmentImageComponent(MapDataComponent):
"""
Apply image augmentors on 1 image component.
"""
def __init__(self, ds, augmentors, index=0, copy=True, catch_exceptions=False):
"""
Args:
ds (DataFlow): input DataFlow.
augmentors (AugmentorList): a list of :class:`imgaug.ImageAugmentor` to be applied in order.
index (int or str): the index or key of the image component to be augmented in the datapoint.
copy (bool): Some augmentors modify the input images. When copy is
True, a copy will be made before any augmentors are applied,
to keep the original images not modified.
Turn it off to save time when you know it's OK.
catch_exceptions (bool): when set to True, will catch
all exceptions and only warn you when there are too many (>100).
Can be used to ignore occasion errors in data.
"""
if isinstance(augmentors, AugmentorList):
self.augs = augmentors
else:
self.augs = AugmentorList(augmentors)
self._copy = copy
self._exception_handler = ExceptionHandler(catch_exceptions)
super(AugmentImageComponent, self).__init__(ds, self._aug_mapper, index)
def reset_state(self):
self.ds.reset_state()
self.augs.reset_state()
def _aug_mapper(self, x):
check_dtype(x)
with self._exception_handler.catch():
if self._copy:
x = copy_mod.deepcopy(x)
return self.augs.augment(x)
class AugmentImageCoordinates(MapData):
"""
Apply image augmentors on an image and a list of coordinates.
Coordinates must be a Nx2 floating point array, each row is (x, y).
"""
def __init__(self, ds, augmentors, img_index=0, coords_index=1, copy=True, catch_exceptions=False):
"""
Args:
ds (DataFlow): input DataFlow.
augmentors (AugmentorList): a list of :class:`imgaug.ImageAugmentor` to be applied in order.
img_index (int or str): the index/key of the image component to be augmented.
coords_index (int or str): the index/key of the coordinate component to be augmented.
copy, catch_exceptions: same as in :class:`AugmentImageComponent`
"""
if isinstance(augmentors, AugmentorList):
self.augs = augmentors
else:
self.augs = AugmentorList(augmentors)
self._img_index = img_index
self._coords_index = coords_index
self._copy = copy
self._exception_handler = ExceptionHandler(catch_exceptions)
super(AugmentImageCoordinates, self).__init__(ds, self._aug_mapper)
def reset_state(self):
self.ds.reset_state()
self.augs.reset_state()
def _aug_mapper(self, dp):
with self._exception_handler.catch():
img, coords = dp[self._img_index], dp[self._coords_index]
check_dtype(img)
validate_coords(coords)
if self._copy:
img, coords = copy_mod.deepcopy((img, coords))
tfms = self.augs.get_transform(img)
dp[self._img_index] = tfms.apply_image(img)
dp[self._coords_index] = tfms.apply_coords(coords)
return dp
class AugmentImageComponents(MapData):
"""
Apply image augmentors on several components, with shared augmentation parameters.
Example:
.. code-block:: python
ds = MyDataFlow() # produce [image(HWC), segmask(HW), keypoint(Nx2)]
ds = AugmentImageComponents(
ds, augs,
index=(0,1), coords_index=(2,))
"""
def __init__(self, ds, augmentors, index=(0, 1), coords_index=(), copy=True, catch_exceptions=False):
"""
Args:
ds (DataFlow): input DataFlow.
augmentors (AugmentorList): a list of :class:`imgaug.ImageAugmentor` instance to be applied in order.
index: tuple of indices of the image components.
coords_index: tuple of indices of the coordinates components.
copy, catch_exceptions: same as in :class:`AugmentImageComponent`
"""
if isinstance(augmentors, AugmentorList):
self.augs = augmentors
else:
self.augs = AugmentorList(augmentors)
self.ds = ds
self._exception_handler = ExceptionHandler(catch_exceptions)
self._copy = copy
self._index = index
self._coords_index = coords_index
super(AugmentImageComponents, self).__init__(ds, self._aug_mapper)
def reset_state(self):
self.ds.reset_state()
self.augs.reset_state()
def _aug_mapper(self, dp):
dp = copy_mod.copy(dp) # always do a shallow copy, make sure the list is intact
copy_func = copy_mod.deepcopy if self._copy else lambda x: x # noqa
with self._exception_handler.catch():
major_image = self._index[0] # image to be used to get params. TODO better design?
im = copy_func(dp[major_image])
check_dtype(im)
tfms = self.augs.get_transform(im)
dp[major_image] = tfms.apply_image(im)
for idx in self._index[1:]:
check_dtype(dp[idx])
dp[idx] = tfms.apply_image(copy_func(dp[idx]))
for idx in self._coords_index:
coords = copy_func(dp[idx])
validate_coords(coords)
dp[idx] = tfms.apply_coords(coords)
return dp
try:
import cv2
from .imgaug import AugmentorList
except ImportError:
from ..utils.develop import create_dummy_class
ImageFromFile = create_dummy_class('ImageFromFile', 'cv2') # noqa
AugmentImageComponent = create_dummy_class('AugmentImageComponent', 'cv2') # noqa
AugmentImageCoordinates = create_dummy_class('AugmentImageCoordinates', 'cv2') # noqa
AugmentImageComponents = create_dummy_class('AugmentImageComponents', 'cv2') # noqa
| 8,873 | 36.285714 | 113 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/format.py | # -*- coding: utf-8 -*-
# File: format.py
import numpy as np
import os
import six
from ..utils import logger
from ..utils.argtools import log_once
from ..utils.serialize import loads
from ..utils.develop import create_dummy_class # noqa
from ..utils.loadcaffe import get_caffe_pb
from ..utils.timer import timed_operation
from ..utils.utils import get_tqdm
from .base import DataFlowReentrantGuard, RNGDataFlow
from .common import MapData
__all__ = ['HDF5Data', 'LMDBData', 'LMDBDataDecoder',
'CaffeLMDB', 'SVMLightData']
"""
Adapters for different data format.
"""
class HDF5Data(RNGDataFlow):
"""
Zip data from different paths in an HDF5 file.
Warning:
The current implementation will load all data into memory. (TODO)
"""
# TODO
def __init__(self, filename, data_paths, shuffle=True):
"""
Args:
filename (str): h5 data file.
data_paths (list): list of h5 paths to zipped.
For example `['images', 'labels']`.
shuffle (bool): shuffle all data.
"""
self.f = h5py.File(filename, 'r')
logger.info("Loading {} to memory...".format(filename))
self.dps = [self.f[k].value for k in data_paths]
lens = [len(k) for k in self.dps]
assert all(k == lens[0] for k in lens)
self._size = lens[0]
self.shuffle = shuffle
def __len__(self):
return self._size
def __iter__(self):
idxs = list(range(self._size))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
yield [dp[k] for dp in self.dps]
class LMDBData(RNGDataFlow):
"""
Read a LMDB database and produce (k,v) raw bytes pairs.
The raw bytes are usually not what you're interested in.
You might want to use
:class:`LMDBDataDecoder` or apply a
mapper function after :class:`LMDBData`.
"""
def __init__(self, lmdb_path, shuffle=True, keys=None):
"""
Args:
lmdb_path (str): a directory or a file.
shuffle (bool): shuffle the keys or not.
keys (list[str] or str): list of str as the keys, used only when shuffle is True.
It can also be a format string e.g. ``{:0>8d}`` which will be
formatted with the indices from 0 to *total_size - 1*.
If not given, it will then look in the database for ``__keys__`` which
:func:`LMDBSerializer.save` used to store the list of keys.
If still not found, it will iterate over the database to find
all the keys.
"""
self._lmdb_path = lmdb_path
self._shuffle = shuffle
self._open_lmdb()
self._size = self._txn.stat()['entries']
self._set_keys(keys)
logger.info("Found {} entries in {}".format(self._size, self._lmdb_path))
# Clean them up after finding the list of keys, since we don't want to fork them
self._close_lmdb()
def _set_keys(self, keys=None):
def find_keys(txn, size):
logger.warn("Traversing the database to find keys is slow. Your should specify the keys.")
keys = []
with timed_operation("Loading LMDB keys ...", log_start=True), \
get_tqdm(total=size) as pbar:
for k in self._txn.cursor():
assert k[0] != b'__keys__'
keys.append(k[0])
pbar.update()
return keys
self.keys = self._txn.get(b'__keys__')
if self.keys is not None:
self.keys = loads(self.keys)
self._size -= 1 # delete this item
if self._shuffle: # keys are necessary when shuffle is True
if keys is None:
if self.keys is None:
self.keys = find_keys(self._txn, self._size)
else:
# check if key-format like '{:0>8d}' was given
if isinstance(keys, six.string_types):
self.keys = map(lambda x: keys.format(x), list(np.arange(self._size)))
else:
self.keys = keys
def _open_lmdb(self):
self._lmdb = lmdb.open(self._lmdb_path,
subdir=os.path.isdir(self._lmdb_path),
readonly=True, lock=False, readahead=True,
map_size=1099511627776 * 2, max_readers=100)
self._txn = self._lmdb.begin()
def _close_lmdb(self):
self._lmdb.close()
del self._lmdb
del self._txn
def reset_state(self):
self._guard = DataFlowReentrantGuard()
super(LMDBData, self).reset_state()
self._open_lmdb() # open the LMDB in the worker process
def __len__(self):
return self._size
def __iter__(self):
with self._guard:
if not self._shuffle:
c = self._txn.cursor()
for k, v in c:
if k != b'__keys__':
yield [k, v]
else:
self.rng.shuffle(self.keys)
for k in self.keys:
v = self._txn.get(k)
yield [k, v]
class LMDBDataDecoder(MapData):
""" Read a LMDB database with a custom decoder and produce decoded outputs."""
def __init__(self, lmdb_data, decoder):
"""
Args:
lmdb_data: a :class:`LMDBData` instance.
decoder (k,v -> dp | None): a function taking k, v and returning a datapoint,
or return None to discard.
"""
def f(dp):
return decoder(dp[0], dp[1])
super(LMDBDataDecoder, self).__init__(lmdb_data, f)
def CaffeLMDB(lmdb_path, shuffle=True, keys=None):
"""
Read a Caffe-format LMDB file where each value contains a ``caffe.Datum`` protobuf.
Produces datapoints of the format: [HWC image, label].
Note that Caffe LMDB format is not efficient: it stores serialized raw
arrays rather than JPEG images.
Args:
lmdb_path, shuffle, keys: same as :class:`LMDBData`.
Example:
.. code-block:: python
ds = CaffeLMDB("/tmp/validation", keys='{:0>8d}')
"""
cpb = get_caffe_pb()
lmdb_data = LMDBData(lmdb_path, shuffle, keys)
def decoder(k, v):
try:
datum = cpb.Datum()
datum.ParseFromString(v)
img = np.fromstring(datum.data, dtype=np.uint8)
img = img.reshape(datum.channels, datum.height, datum.width)
except Exception:
log_once("Cannot read key {}".format(k), 'warn')
return None
return [img.transpose(1, 2, 0), datum.label]
logger.warn("Caffe LMDB format doesn't store jpeg-compressed images, \
it's not recommended due to its inferior performance.")
return LMDBDataDecoder(lmdb_data, decoder)
class SVMLightData(RNGDataFlow):
""" Read X,y from an SVMlight file, and produce [X_i, y_i] pairs. """
def __init__(self, filename, shuffle=True):
"""
Args:
filename (str): input file
shuffle (bool): shuffle the data
"""
import sklearn.datasets # noqa
self.X, self.y = sklearn.datasets.load_svmlight_file(filename)
self.X = np.asarray(self.X.todense())
self.shuffle = shuffle
def __len__(self):
return len(self.y)
def __iter__(self):
idxs = np.arange(self.__len__())
if self.shuffle:
self.rng.shuffle(idxs)
for id in idxs:
yield [self.X[id, :], self.y[id]]
try:
import h5py
except ImportError:
HDF5Data = create_dummy_class('HDF5Data', 'h5py') # noqa
try:
import lmdb
except ImportError:
for klass in ['LMDBData', 'LMDBDataDecoder', 'CaffeLMDB']:
globals()[klass] = create_dummy_class(klass, 'lmdb')
| 7,910 | 31.690083 | 102 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/parallel_map.py | # -*- coding: utf-8 -*-
# File: parallel_map.py
import copy
import ctypes
import multiprocessing as mp
import numpy as np
import threading
import zmq
from six.moves import queue
from ..utils.concurrency import StoppableThread, enable_death_signal
from ..utils.serialize import dumps_once as dumps, loads_once as loads
from .base import DataFlow, DataFlowReentrantGuard, ProxyDataFlow
from .common import RepeatedData, BatchData
from .parallel import _bind_guard, _get_pipe_name, _MultiProcessZMQDataFlow, _repeat_iter, _zmq_catch_error
__all__ = ['MultiThreadMapData',
'MultiProcessMapData', 'MultiProcessMapDataZMQ',
'MultiProcessMapAndBatchData', 'MultiProcessMapAndBatchDataZMQ']
class _ParallelMapData(ProxyDataFlow):
def __init__(self, ds, buffer_size, strict=False):
super(_ParallelMapData, self).__init__(ds)
assert buffer_size > 0, buffer_size
self._buffer_size = buffer_size
self._buffer_occupancy = 0 # actual #elements in buffer, only useful in strict mode
self._strict = strict
def reset_state(self):
super(_ParallelMapData, self).reset_state()
if not self._strict:
ds = RepeatedData(self.ds, -1)
else:
ds = self.ds
self._iter = ds.__iter__()
def _recv(self):
pass
def _send(self, dp):
pass
def _recv_filter_none(self):
ret = self._recv()
assert ret is not None, \
"[{}] Map function cannot return None when strict mode is used.".format(type(self).__name__)
return ret
def _fill_buffer(self, cnt=None):
if cnt is None:
cnt = self._buffer_size - self._buffer_occupancy
try:
for _ in range(cnt):
dp = next(self._iter)
self._send(dp)
except StopIteration:
raise RuntimeError(
"[{}] buffer_size cannot be larger than the size of the DataFlow when strict=True! "
"Please use a smaller buffer_size!".format(type(self).__name__))
self._buffer_occupancy += cnt
def get_data_non_strict(self):
for dp in self._iter:
self._send(dp)
ret = self._recv()
if ret is not None:
yield ret
def get_data_strict(self):
self._fill_buffer()
for dp in self._iter:
self._send(dp)
yield self._recv_filter_none()
self._iter = self.ds.__iter__() # refresh
# first clear the buffer, then fill
for k in range(self._buffer_size):
dp = self._recv_filter_none()
self._buffer_occupancy -= 1
if k == self._buffer_size - 1:
self._fill_buffer()
yield dp
def __iter__(self):
if self._strict:
yield from self.get_data_strict()
else:
yield from self.get_data_non_strict()
class MultiThreadMapData(_ParallelMapData):
"""
Same as :class:`MapData`, but start threads to run the mapping function.
This is useful when the mapping function is the bottleneck, but you don't
want to start processes for the entire dataflow pipeline.
The semantics of this class is **identical** to :class:`MapData` except for the ordering.
Threads run in parallel and can take different time to run the
mapping function. Therefore the order of datapoints won't be preserved.
When ``strict=True``, ``MultiThreadMapData(df, ...)``
is guaranteed to produce the exact set of data as ``MapData(df, ...)``,
if both are iterated until ``StopIteration``. But the produced data will have different ordering.
The behavior of strict mode is undefined if the given dataflow ``df`` is infinite.
When ``strict=False``, the data that's produced by ``MultiThreadMapData(df, ...)``
is a reordering of the data produced by ``RepeatedData(MapData(df, ...), -1)``.
In other words, first pass of ``MultiThreadMapData.__iter__`` may contain
datapoints from the second pass of ``df.__iter__``.
Note:
1. You should avoid starting many threads in your main process to reduce GIL contention.
The threads will only start in the process which calls :meth:`reset_state()`.
Therefore you can use ``MultiProcessRunnerZMQ(MultiThreadMapData(...), 1)``
to reduce GIL contention.
"""
class _Worker(StoppableThread):
def __init__(self, inq, outq, evt, map_func):
super(MultiThreadMapData._Worker, self).__init__(evt)
self.inq = inq
self.outq = outq
self.func = map_func
self.daemon = True
def run(self):
try:
while True:
dp = self.queue_get_stoppable(self.inq)
if self.stopped():
return
# cannot ignore None here. will lead to unsynced send/recv
obj = self.func(dp)
self.queue_put_stoppable(self.outq, obj)
except Exception:
if self.stopped():
pass # skip duplicated error messages
else:
raise
finally:
self.stop()
def __init__(self, ds, num_thread=None, map_func=None, *, buffer_size=200, strict=False):
"""
Args:
ds (DataFlow): the dataflow to map
num_thread (int): number of threads to use
map_func (callable): datapoint -> datapoint | None. Return None to
discard/skip the datapoint.
buffer_size (int): number of datapoints in the buffer
strict (bool): use "strict mode", see notes above.
"""
if strict:
# In strict mode, buffer size cannot be larger than the total number of datapoints
try:
buffer_size = min(buffer_size, len(ds))
except Exception: # ds may not have a length
pass
super(MultiThreadMapData, self).__init__(ds, buffer_size, strict)
assert num_thread > 0, num_thread
self._strict = strict
self.num_thread = num_thread
self.map_func = map_func
self._threads = []
self._evt = None
def reset_state(self):
super(MultiThreadMapData, self).reset_state()
if self._threads:
self._threads[0].stop()
for t in self._threads:
t.join()
self._in_queue = queue.Queue()
self._out_queue = queue.Queue()
self._evt = threading.Event()
self._threads = [MultiThreadMapData._Worker(
self._in_queue, self._out_queue, self._evt, self.map_func)
for _ in range(self.num_thread)]
for t in self._threads:
t.start()
self._guard = DataFlowReentrantGuard()
# Call once at the beginning, to ensure inq+outq has a total of buffer_size elements
self._fill_buffer()
def _recv(self):
return self._out_queue.get()
def _send(self, dp):
self._in_queue.put(dp)
def __iter__(self):
with self._guard:
yield from super(MultiThreadMapData, self).__iter__()
def __del__(self):
if self._evt is not None:
self._evt.set()
for p in self._threads:
p.stop()
p.join(timeout=5.0)
# if p.is_alive():
# logger.warn("Cannot join thread {}.".format(p.name))
class MultiProcessMapDataZMQ(_ParallelMapData, _MultiProcessZMQDataFlow):
"""
Same as :class:`MapData`, but start processes to run the mapping function,
and communicate with ZeroMQ pipe.
The semantics of this class is **identical** to :class:`MapData` except for the ordering.
Processes run in parallel and can take different time to run the
mapping function. Therefore the order of datapoints won't be preserved.
When ``strict=True``, ``MultiProcessMapData(df, ...)``
is guaranteed to produce the exact set of data as ``MapData(df, ...)``,
if both are iterated until ``StopIteration``. But the produced data will have different ordering.
The behavior of strict mode is undefined if the given dataflow ``df`` is infinite.
When ``strict=False``, the data that's produced by ``MultiProcessMapData(df, ...)``
is a reordering of the data produced by ``RepeatedData(MapData(df, ...), -1)``.
In other words, first pass of ``MultiProcessMapData.__iter__`` may contain
datapoints from the second pass of ``df.__iter__``.
"""
class _Worker(mp.Process):
def __init__(self, identity, map_func, pipename, hwm):
super(MultiProcessMapDataZMQ._Worker, self).__init__()
self.identity = identity
self.map_func = map_func
self.pipename = pipename
self.hwm = hwm
def run(self):
enable_death_signal(_warn=self.identity == b'0')
ctx = zmq.Context()
socket = ctx.socket(zmq.REP)
socket.setsockopt(zmq.IDENTITY, self.identity)
socket.set_hwm(self.hwm)
socket.connect(self.pipename)
while True:
dp = loads(socket.recv(copy=False))
dp = self.map_func(dp)
socket.send(dumps(dp), copy=False)
def __init__(self, ds, num_proc=None, map_func=None, *, buffer_size=200, strict=False):
"""
Args:
ds (DataFlow): the dataflow to map
num_proc(int): number of threads to use
map_func (callable): datapoint -> datapoint | None. Return None to
discard/skip the datapoint.
buffer_size (int): number of datapoints in the buffer
strict (bool): use "strict mode", see notes above.
"""
if strict:
# In strict mode, buffer size cannot be larger than the total number of datapoints
try:
buffer_size = min(buffer_size, len(ds))
except Exception: # ds may not have a length
pass
_ParallelMapData.__init__(self, ds, buffer_size, strict)
_MultiProcessZMQDataFlow.__init__(self)
assert num_proc > 0, num_proc
self.num_proc = num_proc
self.map_func = map_func
self._strict = strict
self._procs = []
def _create_worker(self, id, pipename, hwm):
return MultiProcessMapDataZMQ._Worker(id, self.map_func, pipename, hwm)
def reset_state(self):
_MultiProcessZMQDataFlow.reset_state(self)
_ParallelMapData.reset_state(self)
self._guard = DataFlowReentrantGuard()
self.context = zmq.Context()
self.socket = self.context.socket(zmq.DEALER)
self.socket.set_hwm(self._buffer_size * 2)
pipename = _get_pipe_name('dataflow-map')
_bind_guard(self.socket, pipename)
self._proc_ids = [u'{}'.format(k).encode('utf-8') for k in range(self.num_proc)]
worker_hwm = int(self._buffer_size * 2 // self.num_proc)
self._procs = [self._create_worker(self._proc_ids[k], pipename, worker_hwm)
for k in range(self.num_proc)]
self._start_processes()
self._fill_buffer() # pre-fill the bufer
def _send(self, dp):
msg = [b"", dumps(dp)]
self.socket.send_multipart(msg, copy=False)
def _recv(self):
msg = self.socket.recv_multipart(copy=False)
dp = loads(msg[1])
return dp
def __iter__(self):
with self._guard, _zmq_catch_error(type(self).__name__):
yield from super(MultiProcessMapDataZMQ, self).__iter__()
class MultiProcessMapAndBatchDataZMQ(_MultiProcessZMQDataFlow):
"""
Similar to :class:`MultiProcessMapDataZMQ`, except that this DataFlow
also does batching in parallel in the worker processes.
Therefore it can be helpful if you wish to hide the latency of batching.
When `nr_proc==1`, the behavior of this class is identical to
`BatchData(MapData(ds, map_func), batch_size)`.
When `nr_proc>1`, the datapoints may be grouped in arbitrary order,
or grouped with datapoints from a different pass of the given dataflow.
"""
class _Dispatcher(mp.Process):
def __init__(self, ds, pipename, hwm):
super(MultiProcessMapAndBatchDataZMQ._Dispatcher, self).__init__()
self.ds = RepeatedData(ds, -1)
self.pipename = pipename
self.hwm = hwm
def run(self):
enable_death_signal()
ctx = zmq.Context()
socket = ctx.socket(zmq.PUSH)
socket.set_hwm(self.hwm)
socket.bind(self.pipename)
self.ds.reset_state()
for dp in self.ds:
socket.send(dumps(dp), copy=False)
class _Worker(mp.Process):
def __init__(self, identity, map_func, input_pipe, result_pipe, hwm, batch_size):
super(MultiProcessMapAndBatchDataZMQ._Worker, self).__init__()
self.identity = identity
self.map_func = map_func
self.input_pipe = input_pipe
self.result_pipe = result_pipe
self.hwm = hwm
self.batch_size = batch_size
def run(self):
enable_death_signal(_warn=self.identity == b'0')
ctx = zmq.Context()
# recv jobs
socket = ctx.socket(zmq.PULL)
socket.setsockopt(zmq.IDENTITY, self.identity)
socket.set_hwm(self.hwm * self.batch_size)
socket.connect(self.input_pipe)
# send results
out_socket = ctx.socket(zmq.PUSH)
out_socket.set_hwm(max(self.hwm, 5))
out_socket.connect(self.result_pipe)
batch = []
while True:
dp = loads(socket.recv(copy=False))
dp = self.map_func(dp)
if dp is not None:
batch.append(dp)
if len(batch) == self.batch_size:
dp = BatchData.aggregate_batch(batch)
out_socket.send(dumps(dp), copy=False)
del batch[:]
def __init__(self, ds, num_proc, map_func, batch_size, buffer_size=None):
"""
Args:
ds (DataFlow): the dataflow to map
num_proc(int): number of threads to use
map_func (callable): datapoint -> datapoint | None. Return None to
discard/skip the datapoint.
batch_size (int): batch size
buffer_size (int): number of datapoints (not batched) in the buffer.
Defaults to batch_size * 10
"""
super(MultiProcessMapAndBatchDataZMQ, self).__init__()
assert batch_size < buffer_size
self.ds = ds
self.num_proc = num_proc
self.map_func = map_func
self.batch_size = batch_size
if buffer_size is None:
buffer_size = batch_size * 10
self.buffer_size = buffer_size
def reset_state(self):
_MultiProcessZMQDataFlow.reset_state(self)
self._guard = DataFlowReentrantGuard()
job_pipe = _get_pipe_name("dataflow_MaB_job")
result_pipe = _get_pipe_name("dataflow_MaB_result")
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PULL)
self.socket.set_hwm(max(5, self.buffer_size // self.batch_size))
_bind_guard(self.socket, result_pipe)
dispatcher = MultiProcessMapAndBatchDataZMQ._Dispatcher(self.ds, job_pipe, self.buffer_size)
self._proc_ids = [u'{}'.format(k).encode('utf-8') for k in range(self.num_proc)]
worker_hwm = max(3, self.buffer_size // self.num_proc // self.batch_size)
self._procs = [MultiProcessMapAndBatchDataZMQ._Worker(
self._proc_ids[k], self.map_func, job_pipe, result_pipe, worker_hwm, self.batch_size)
for k in range(self.num_proc)]
self._procs.append(dispatcher)
self._start_processes()
def __iter__(self):
with self._guard, _zmq_catch_error(type(self).__name__):
while True:
yield loads(self.socket.recv(copy=False))
def _pool_map(data):
global SHARED_ARR, WORKER_ID, MAP_FUNC
res = MAP_FUNC(data)
if res is None:
return None
shared = np.reshape(SHARED_ARR, res.shape)
assert shared.dtype == res.dtype
shared[:] = res
return WORKER_ID
# TODO shutdown pool, improve speed.
class MultiProcessMapDataComponentSharedArray(DataFlow):
"""
Similar to :class:`MapDataComponent`, but perform IPC by shared memory,
therefore more efficient when data (result of map_func) is large.
It requires `map_func` to always return a numpy array of fixed shape and dtype, or None.
"""
def __init__(self, ds, nr_proc, map_func, output_shape, output_dtype, index=0):
"""
Args:
ds (DataFlow): the dataflow to map on
nr_proc(int): number of processes
map_func (data component -> ndarray | None): the mapping function
output_shape (tuple): the shape of the output of map_func
output_dtype (np.dtype): the type of the output of map_func
index (int): the index of the datapoint component to map on.
"""
self.ds = ds
self.nr_proc = nr_proc
self.map_func = map_func
self.output_shape = output_shape
self.output_dtype = np.dtype(output_dtype).type
self.index = index
self._shared_mem = [self._create_shared_arr() for k in range(nr_proc)]
id_queue = mp.Queue()
for k in range(nr_proc):
id_queue.put(k)
def _init_pool(arrs, queue, map_func):
id = queue.get()
global SHARED_ARR, WORKER_ID, MAP_FUNC
SHARED_ARR = arrs[id]
WORKER_ID = id
MAP_FUNC = map_func
self._pool = mp.pool.Pool(
processes=nr_proc,
initializer=_init_pool,
initargs=(self._shared_mem, id_queue, map_func))
def _create_shared_arr(self):
TYPE = {
np.float32: ctypes.c_float,
np.float64: ctypes.c_double,
np.uint8: ctypes.c_uint8,
np.int8: ctypes.c_int8,
np.int32: ctypes.c_int32,
}
ctype = TYPE[self.output_dtype]
arr = mp.RawArray(ctype, int(np.prod(self.output_shape)))
return arr
def __len__(self):
return len(self.ds)
def reset_state(self):
self.ds.reset_state()
self._guard = DataFlowReentrantGuard()
def __iter__(self):
ds_itr = _repeat_iter(self.ds.get_data)
with self._guard:
while True:
dps = []
for k in range(self.nr_proc):
dps.append(copy.copy(next(ds_itr)))
to_map = [x[self.index] for x in dps]
res = self._pool.map_async(_pool_map, to_map)
for index in res.get():
if index is None:
continue
arr = np.reshape(self._shared_mem[index], self.output_shape)
dp = dps[index]
dp[self.index] = arr.copy()
yield dp
# alias
MultiProcessMapData = MultiProcessMapDataZMQ
MultiProcessMapAndBatchData = MultiProcessMapAndBatchDataZMQ
if __name__ == '__main__':
import time
class Zero(DataFlow):
def __init__(self, size):
self._size = size
def __iter__(self):
for k in range(self._size):
yield [k]
def __len__(self):
return self._size
def f(x):
if x[0] < 10:
time.sleep(1)
return x
ds = Zero(100)
ds = MultiThreadMapData(ds, 50, f, buffer_size=50, strict=True)
ds.reset_state()
for idx, k in enumerate(ds):
print("Bang!", k)
if idx == 100:
break
print("END!")
| 20,029 | 35.352087 | 107 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/common.py | # -*- coding: utf-8 -*-
# File: common.py
from __future__ import division
import itertools
import numpy as np
import pprint
from collections import defaultdict, deque
from copy import copy
import six
import tqdm
from termcolor import colored
from ..utils import logger
from ..utils.utils import get_rng, get_tqdm, get_tqdm_kwargs
from .base import DataFlow, DataFlowReentrantGuard, ProxyDataFlow, RNGDataFlow
try:
from collections.abc import Mapping
except ImportError:
from collections import Mapping
__all__ = ['TestDataSpeed', 'PrintData', 'BatchData', 'BatchDataByShape', 'FixedSizeData', 'MapData',
'MapDataComponent', 'RepeatedData', 'RepeatedDataPoint', 'RandomChooseData',
'RandomMixData', 'JoinData', 'ConcatData', 'SelectComponent',
'LocallyShuffleData', 'CacheData']
class TestDataSpeed(ProxyDataFlow):
""" Test the speed of a DataFlow """
def __init__(self, ds, size=5000, warmup=0):
"""
Args:
ds (DataFlow): the DataFlow to test.
size (int): number of datapoints to fetch.
warmup (int): warmup iterations
"""
super(TestDataSpeed, self).__init__(ds)
self.test_size = int(size)
self.warmup = int(warmup)
self._reset_called = False
def reset_state(self):
self._reset_called = True
super(TestDataSpeed, self).reset_state()
def __iter__(self):
""" Will run testing at the beginning, then produce data normally. """
self.start()
yield from self.ds
def start(self):
"""
Start testing with a progress bar.
"""
if not self._reset_called:
self.ds.reset_state()
itr = self.ds.__iter__()
if self.warmup:
for _ in tqdm.trange(self.warmup, **get_tqdm_kwargs()):
next(itr)
# add smoothing for speed benchmark
with get_tqdm(total=self.test_size,
leave=True, smoothing=0.2) as pbar:
for idx, dp in enumerate(itr):
pbar.update()
if idx == self.test_size - 1:
break
class BatchData(ProxyDataFlow):
"""
Stack datapoints into batches.
It produces datapoints of the same number of components as ``ds``, but
each component has one new extra dimension of size ``batch_size``.
The batch can be either a list of original components, or (by default)
a numpy array of original components.
"""
def __init__(self, ds, batch_size, remainder=False, use_list=False):
"""
Args:
ds (DataFlow): A dataflow that produces either list or dict.
When ``use_list=False``, the components of ``ds``
must be either scalars or :class:`np.ndarray`, and have to be consistent in shapes.
batch_size(int): batch size
remainder (bool): When the remaining datapoints in ``ds`` is not
enough to form a batch, whether or not to also produce the remaining
data as a smaller batch.
If set to False, all produced datapoints are guaranteed to have the same batch size.
If set to True, `len(ds)` must be accurate.
use_list (bool): if True, each component will contain a list
of datapoints instead of an numpy array of an extra dimension.
"""
super(BatchData, self).__init__(ds)
if not remainder:
try:
assert batch_size <= len(ds)
except NotImplementedError:
pass
self.batch_size = int(batch_size)
assert self.batch_size > 0
self.remainder = remainder
self.use_list = use_list
def __len__(self):
ds_size = len(self.ds)
div = ds_size // self.batch_size
rem = ds_size % self.batch_size
if rem == 0:
return div
return div + int(self.remainder)
def __iter__(self):
"""
Yields:
Batched data by stacking each component on an extra 0th dimension.
"""
holder = []
for data in self.ds:
holder.append(data)
if len(holder) == self.batch_size:
yield BatchData.aggregate_batch(holder, self.use_list)
del holder[:]
if self.remainder and len(holder) > 0:
yield BatchData.aggregate_batch(holder, self.use_list)
@staticmethod
def _batch_numpy(data_list):
data = data_list[0]
if isinstance(data, six.integer_types):
dtype = 'int32'
elif type(data) == bool:
dtype = 'bool'
elif type(data) == float:
dtype = 'float32'
elif isinstance(data, (six.binary_type, six.text_type)):
dtype = 'str'
else:
try:
dtype = data.dtype
except AttributeError:
raise TypeError("Unsupported type to batch: {}".format(type(data)))
try:
return np.asarray(data_list, dtype=dtype)
except Exception as e: # noqa
logger.exception("Cannot batch data. Perhaps they are of inconsistent shape?")
if isinstance(data, np.ndarray):
s = pprint.pformat([x.shape for x in data_list])
logger.error("Shape of all arrays to be batched: " + s)
try:
# open an ipython shell if possible
import IPython as IP; IP.embed() # noqa
except ImportError:
pass
@staticmethod
def aggregate_batch(data_holder, use_list=False):
"""
Aggregate a list of datapoints to one batched datapoint.
Args:
data_holder (list[dp]): each dp is either a list or a dict.
use_list (bool): whether to batch data into a list or a numpy array.
Returns:
dp:
either a list or a dict, depend on the inputs.
Each item is a batched version of the corresponding inputs.
"""
first_dp = data_holder[0]
if isinstance(first_dp, (list, tuple)):
result = []
for k in range(len(first_dp)):
data_list = [x[k] for x in data_holder]
if use_list:
result.append(data_list)
else:
result.append(BatchData._batch_numpy(data_list))
elif isinstance(first_dp, dict):
result = {}
for key in first_dp.keys():
data_list = [x[key] for x in data_holder]
if use_list:
result[key] = data_list
else:
result[key] = BatchData._batch_numpy(data_list)
else:
raise ValueError("Data point has to be list/tuple/dict. Got {}".format(type(first_dp)))
return result
class BatchDataByShape(BatchData):
"""
Group datapoints of the same shape together to batches.
It doesn't require input DataFlow to be homogeneous anymore: it can have
datapoints of different shape, and batches will be formed from those who
have the same shape.
Note:
It is implemented by a dict{shape -> datapoints}.
Therefore, datapoints of uncommon shapes may never be enough to form a batch and
never get generated.
"""
def __init__(self, ds, batch_size, idx):
"""
Args:
ds (DataFlow): input DataFlow. ``dp[idx]`` has to be an :class:`np.ndarray`.
batch_size (int): batch size
idx (int): ``dp[idx].shape`` will be used to group datapoints.
Other components are assumed to be batch-able.
"""
super(BatchDataByShape, self).__init__(ds, batch_size, remainder=False)
self.idx = idx
def reset_state(self):
super(BatchDataByShape, self).reset_state()
self.holder = defaultdict(list)
self._guard = DataFlowReentrantGuard()
def __iter__(self):
with self._guard:
for dp in self.ds:
shp = dp[self.idx].shape
holder = self.holder[shp]
holder.append(dp)
if len(holder) == self.batch_size:
yield BatchData.aggregate_batch(holder)
del holder[:]
class FixedSizeData(ProxyDataFlow):
""" Generate data from another DataFlow, but with a fixed total count.
"""
def __init__(self, ds, size, keep_state=True):
"""
Args:
ds (DataFlow): input dataflow
size (int): size
keep_state (bool): keep the iterator state of ``ds``
between calls to :meth:`__iter__()`, so that the
next call will continue the previous iteration over ``ds``,
instead of reinitializing an iterator.
Example:
.. code-block:: none
ds produces: 1, 2, 3, 4, 5; 1, 2, 3, 4, 5; ...
FixedSizeData(ds, 3, True): 1, 2, 3; 4, 5, 1; 2, 3, 4; ...
FixedSizeData(ds, 3, False): 1, 2, 3; 1, 2, 3; ...
FixedSizeData(ds, 6, False): 1, 2, 3, 4, 5, 1; 1, 2, 3, 4, 5, 1;...
"""
super(FixedSizeData, self).__init__(ds)
self._size = int(size)
self.itr = None
self._keep = keep_state
def __len__(self):
return self._size
def reset_state(self):
super(FixedSizeData, self).reset_state()
self.itr = self.ds.__iter__()
self._guard = DataFlowReentrantGuard()
def __iter__(self):
with self._guard:
if self.itr is None:
self.itr = self.ds.__iter__()
cnt = 0
while True:
try:
dp = next(self.itr)
except StopIteration:
self.itr = self.ds.__iter__()
dp = next(self.itr)
cnt += 1
yield dp
if cnt == self._size:
if not self._keep:
self.itr = None
return
class MapData(ProxyDataFlow):
"""
Apply a mapper/filter on the datapoints of a DataFlow.
Note:
1. Please make sure func doesn't modify its arguments in place,
unless you're certain it's safe.
2. If you discard some datapoints, ``len(MapData(ds))`` will be incorrect.
Example:
.. code-block:: none
ds = Mnist('train') # each datapoint is [img, label]
ds = MapData(ds, lambda dp: [dp[0] * 255, dp[1]])
"""
def __init__(self, ds, func):
"""
Args:
ds (DataFlow): input DataFlow
func (datapoint -> datapoint | None): takes a datapoint and returns a new
datapoint. Return None to discard/skip this datapoint.
"""
super(MapData, self).__init__(ds)
self.func = func
def __iter__(self):
for dp in self.ds:
ret = self.func(copy(dp)) # shallow copy the list
if ret is not None:
yield ret
class MapDataComponent(MapData):
"""
Apply a mapper/filter on a datapoint component.
Note:
1. This dataflow itself doesn't modify the datapoints.
But please make sure func doesn't modify its arguments in place,
unless you're certain it's safe.
2. If you discard some datapoints, ``len(MapDataComponent(ds, ..))`` will be incorrect.
Example:
.. code-block:: none
ds = Mnist('train') # each datapoint is [img, label]
ds = MapDataComponent(ds, lambda img: img * 255, 0) # map the 0th component
"""
def __init__(self, ds, func, index=0):
"""
Args:
ds (DataFlow): input DataFlow which produces either list or dict.
func (TYPE -> TYPE|None): takes ``dp[index]``, returns a new value for ``dp[index]``.
Return None to discard/skip this datapoint.
index (int or str): index or key of the component.
"""
self._index = index
self._func = func
super(MapDataComponent, self).__init__(ds, self._mapper)
def _mapper(self, dp):
r = self._func(dp[self._index])
if r is None:
return None
dp = copy(dp) # shallow copy to avoid modifying the datapoint
if isinstance(dp, tuple):
dp = list(dp) # to be able to modify it in the next line
dp[self._index] = r
return dp
class RepeatedData(ProxyDataFlow):
""" Take data points from another DataFlow and produce them until
it's exhausted for certain amount of times. i.e.:
dp1, dp2, .... dpn, dp1, dp2, ....dpn
"""
def __init__(self, ds, num):
"""
Args:
ds (DataFlow): input DataFlow
num (int): number of times to repeat ds.
Set to -1 to repeat ``ds`` infinite times.
"""
self.num = num
super(RepeatedData, self).__init__(ds)
def __len__(self):
"""
Raises:
:class:`ValueError` when num == -1.
"""
if self.num == -1:
raise NotImplementedError("__len__() is unavailable for infinite dataflow")
return len(self.ds) * self.num
def __iter__(self):
if self.num == -1:
while True:
yield from self.ds
else:
for _ in range(self.num):
yield from self.ds
class RepeatedDataPoint(ProxyDataFlow):
""" Take data points from another DataFlow and produce them a
certain number of times. i.e.:
dp1, dp1, ..., dp1, dp2, ..., dp2, ...
"""
def __init__(self, ds, num):
"""
Args:
ds (DataFlow): input DataFlow
num (int): number of times to repeat each datapoint.
"""
self.num = int(num)
assert self.num >= 1, self.num
super(RepeatedDataPoint, self).__init__(ds)
def __len__(self):
return len(self.ds) * self.num
def __iter__(self):
for dp in self.ds:
for _ in range(self.num):
yield dp
class RandomChooseData(RNGDataFlow):
"""
Randomly choose from several DataFlow.
Stop producing when any of them is exhausted.
"""
def __init__(self, df_lists):
"""
Args:
df_lists (list): a list of DataFlow, or a list of (DataFlow, probability) tuples.
Probabilities must sum to 1 if used.
"""
super(RandomChooseData, self).__init__()
if isinstance(df_lists[0], (tuple, list)):
assert sum(v[1] for v in df_lists) == 1.0
self.df_lists = df_lists
else:
prob = 1.0 / len(df_lists)
self.df_lists = [(k, prob) for k in df_lists]
def reset_state(self):
super(RandomChooseData, self).reset_state()
for d in self.df_lists:
if isinstance(d, tuple):
d[0].reset_state()
else:
d.reset_state()
def __iter__(self):
itrs = [v[0].__iter__() for v in self.df_lists]
probs = np.array([v[1] for v in self.df_lists])
try:
while True:
itr = self.rng.choice(itrs, p=probs)
yield next(itr)
except StopIteration:
return
class RandomMixData(RNGDataFlow):
"""
Perfectly mix datapoints from several DataFlow using their
:meth:`__len__()`. Will stop when all DataFlow exhausted.
"""
def __init__(self, df_lists):
"""
Args:
df_lists (list): a list of DataFlow.
All DataFlow must implement ``__len__()``.
"""
super(RandomMixData, self).__init__()
self.df_lists = df_lists
self.sizes = [len(k) for k in self.df_lists]
def reset_state(self):
super(RandomMixData, self).reset_state()
for d in self.df_lists:
d.reset_state()
def __len__(self):
return sum(self.sizes)
def __iter__(self):
sums = np.cumsum(self.sizes)
idxs = np.arange(self.__len__())
self.rng.shuffle(idxs)
idxs = np.array(list(map(
lambda x: np.searchsorted(sums, x, 'right'), idxs)))
itrs = [k.__iter__() for k in self.df_lists]
assert idxs.max() == len(itrs) - 1, "{}!={}".format(idxs.max(), len(itrs) - 1)
for k in idxs:
yield next(itrs[k])
# TODO run till exception
class ConcatData(DataFlow):
"""
Concatenate several DataFlow.
Produce datapoints from each DataFlow and start the next when one
DataFlow is exhausted.
"""
def __init__(self, df_lists):
"""
Args:
df_lists (list): a list of DataFlow.
"""
self.df_lists = df_lists
def reset_state(self):
for d in self.df_lists:
d.reset_state()
def __len__(self):
return sum(len(x) for x in self.df_lists)
def __iter__(self):
for d in self.df_lists:
yield from d
class JoinData(DataFlow):
"""
Join the components from each DataFlow. See below for its behavior.
Note that you can't join a DataFlow that produces lists with one that produces dicts.
Example:
.. code-block:: none
df1 produces: [c1, c2]
df2 produces: [c3, c4]
joined: [c1, c2, c3, c4]
df1 produces: {"a":c1, "b":c2}
df2 produces: {"c":c3}
joined: {"a":c1, "b":c2, "c":c3}
"""
def __init__(self, df_lists):
"""
Args:
df_lists (list): a list of DataFlow.
When these dataflows have different sizes, JoinData will stop when any
of them is exhausted.
The list could contain the same DataFlow instance more than once,
but note that in that case `__iter__` will then also be called many times.
"""
self.df_lists = df_lists
try:
self._size = len(self.df_lists[0])
for d in self.df_lists:
assert len(d) == self._size, \
"All DataFlow must have the same size! {} != {}".format(len(d), self._size)
except Exception:
logger.info("[JoinData] Size check failed for the list of dataflow to be joined!")
def reset_state(self):
for d in set(self.df_lists):
d.reset_state()
def __len__(self):
"""
Return the minimum size among all.
"""
return min(len(k) for k in self.df_lists)
def __iter__(self):
itrs = [k.__iter__() for k in self.df_lists]
try:
while True:
all_dps = [next(itr) for itr in itrs]
if isinstance(all_dps[0], (list, tuple)):
dp = list(itertools.chain(*all_dps))
else:
dp = {}
for x in all_dps:
dp.update(x)
yield dp
except StopIteration: # some of them are exhausted
pass
def SelectComponent(ds, idxs):
"""
Select / reorder components from datapoints.
Args:
ds (DataFlow): input DataFlow.
idxs (list[int] or list[str]): a list of component indices/keys.
Example:
.. code-block:: none
original df produces: [c1, c2, c3]
idxs: [2,1]
this df: [c3, c2]
"""
return MapData(ds, lambda dp: [dp[i] for i in idxs])
class LocallyShuffleData(ProxyDataFlow, RNGDataFlow):
""" Buffer the datapoints from a given dataflow, and shuffle them before producing them.
This can be used as an alternative when a complete random shuffle is too expensive
or impossible for the data source.
This dataflow has the following behavior:
1. It takes datapoints from the given dataflow `ds` to an internal buffer of fixed size.
Each datapoint is duplicated for `num_reuse` times.
2. Once the buffer is full, this dataflow starts to yield data from the beginning of the buffer,
and new datapoints will be added to the end of the buffer. This is like a FIFO queue.
3. The internal buffer is shuffled after every `shuffle_interval` datapoints that come from `ds`.
To maintain shuffling states, this dataflow is not reentrant.
Datapoints from one pass of `ds` will get mixed with datapoints from a different pass.
As a result, the iterator of this dataflow will run indefinitely
because it does not make sense to stop the iteration anywhere.
"""
def __init__(self, ds, buffer_size, num_reuse=1, shuffle_interval=None):
"""
Args:
ds (DataFlow): input DataFlow.
buffer_size (int): size of the buffer.
num_reuse (int): duplicate each datapoints several times into the buffer to improve
speed, but duplication may hurt your model.
shuffle_interval (int): shuffle the buffer after this many
datapoints were produced from the given dataflow. Frequent shuffle on large buffer
may affect speed, but infrequent shuffle may not provide enough randomness.
Defaults to buffer_size / 3
"""
ProxyDataFlow.__init__(self, ds)
self.q = deque(maxlen=buffer_size)
if shuffle_interval is None:
shuffle_interval = int(buffer_size // 3)
self.shuffle_interval = shuffle_interval
self.num_reuse = num_reuse
self._inf_ds = RepeatedData(ds, -1)
def reset_state(self):
self._guard = DataFlowReentrantGuard()
ProxyDataFlow.reset_state(self)
RNGDataFlow.reset_state(self)
self._iter_cnt = 0
self._inf_iter = iter(self._inf_ds)
def __len__(self):
return len(self.ds) * self.num_reuse
def __iter__(self):
with self._guard:
for dp in self._inf_iter:
self._iter_cnt = (self._iter_cnt + 1) % self.shuffle_interval
# fill queue
if self._iter_cnt == 0:
self.rng.shuffle(self.q)
for _ in range(self.num_reuse):
if self.q.maxlen == len(self.q):
yield self.q.popleft()
self.q.append(dp)
class CacheData(ProxyDataFlow):
"""
Completely cache the first pass of a DataFlow in memory,
and produce from the cache thereafter.
NOTE: The user should not stop the iterator before it has reached the end.
Otherwise the cache may be incomplete.
"""
def __init__(self, ds, shuffle=False):
"""
Args:
ds (DataFlow): input DataFlow.
shuffle (bool): whether to shuffle the cache before yielding from it.
"""
self.shuffle = shuffle
super(CacheData, self).__init__(ds)
def reset_state(self):
super(CacheData, self).reset_state()
self._guard = DataFlowReentrantGuard()
if self.shuffle:
self.rng = get_rng(self)
self.buffer = []
def __iter__(self):
with self._guard:
if len(self.buffer):
if self.shuffle:
self.rng.shuffle(self.buffer)
yield from self.buffer
else:
for dp in self.ds:
yield dp
self.buffer.append(dp)
class PrintData(ProxyDataFlow):
"""
Behave like an identity proxy, but print shape and range of the first few datapoints.
Good for debugging.
Example:
Place it somewhere in your dataflow like
.. code-block:: python
def create_my_dataflow():
ds = SomeDataSource('path/to/lmdb')
ds = SomeInscrutableMappings(ds)
ds = PrintData(ds, num=2, max_list=2)
return ds
ds = create_my_dataflow()
# other code that uses ds
When datapoints are taken from the dataflow, it will print outputs like:
.. code-block:: none
[0110 09:22:21 @common.py:589] DataFlow Info:
datapoint 0<2 with 4 components consists of
0: float with value 0.0816501893251
1: ndarray:int32 of shape (64,) in range [0, 10]
2: ndarray:float32 of shape (64, 64) in range [-1.2248, 1.2177]
3: list of len 50
0: ndarray:int32 of shape (64, 64) in range [-128, 80]
1: ndarray:float32 of shape (64, 64) in range [0.8400, 0.6845]
...
datapoint 1<2 with 4 components consists of
0: float with value 5.88252075399
1: ndarray:int32 of shape (64,) in range [0, 10]
2: ndarray:float32 of shape (64, 64) with range [-0.9011, 0.8491]
3: list of len 50
0: ndarray:int32 of shape (64, 64) in range [-70, 50]
1: ndarray:float32 of shape (64, 64) in range [0.7400, 0.3545]
...
"""
def __init__(self, ds, num=1, name=None, max_depth=3, max_list=3):
"""
Args:
ds (DataFlow): input DataFlow.
num (int): number of dataflow points to print.
name (str, optional): name to identify this DataFlow.
max_depth (int, optional): stop output when too deep recursion in sub elements
max_list (int, optional): stop output when too many sub elements
"""
super(PrintData, self).__init__(ds)
self.num = num
self.name = name
self.cnt = 0
self.max_depth = max_depth
self.max_list = max_list
def _analyze_input_data(self, entry, k, depth=1, max_depth=3, max_list=3):
"""
Gather useful debug information from a datapoint.
Args:
entry: the datapoint component, either a list or a dict
k (int): index of this component in current datapoint
depth (int, optional): recursion depth
max_depth, max_list: same as in :meth:`__init__`.
Returns:
string: debug message
"""
class _elementInfo(object):
def __init__(self, el, pos, depth=0, max_list=3):
self.shape = ""
self.type = type(el).__name__
self.dtype = ""
self.range = ""
self.sub_elements = []
self.ident = " " * (depth * 2)
self.pos = pos
numpy_scalar_types = list(itertools.chain(*np.sctypes.values()))
if isinstance(el, (int, float, bool)):
self.range = " with value {}".format(el)
elif type(el) is np.ndarray:
self.shape = " of shape {}".format(el.shape)
self.dtype = ":{}".format(str(el.dtype))
self.range = " in range [{}, {}]".format(el.min(), el.max())
elif type(el) in numpy_scalar_types:
self.range = " with value {}".format(el)
elif isinstance(el, (list, tuple)):
self.shape = " of len {}".format(len(el))
if depth < max_depth:
for k, subel in enumerate(el):
if k < max_list:
self.sub_elements.append(_elementInfo(subel, k, depth + 1, max_list))
else:
self.sub_elements.append(" " * ((depth + 1) * 2) + '...')
break
else:
if len(el) > 0:
self.sub_elements.append(" " * ((depth + 1) * 2) + ' ...')
def __str__(self):
strings = []
vals = (self.ident, self.pos, self.type, self.dtype, self.shape, self.range)
strings.append("{}{}: {}{}{}{}".format(*vals))
for k, el in enumerate(self.sub_elements):
strings.append(str(el))
return "\n".join(strings)
return str(_elementInfo(entry, k, depth, max_list))
def _get_msg(self, dp):
msg = [colored(u"datapoint %i/%i with %i components consists of" %
(self.cnt, self.num, len(dp)), "cyan")]
is_dict = isinstance(dp, Mapping)
for k, entry in enumerate(dp):
if is_dict:
key, value = entry, dp[entry]
else:
key, value = k, entry
msg.append(self._analyze_input_data(value, key, max_depth=self.max_depth, max_list=self.max_list))
return u'\n'.join(msg)
def __iter__(self):
for dp in self.ds:
# it is important to place this here! otherwise it mixes the output of multiple PrintData
if self.cnt == 0:
label = ' (%s)' % self.name if self.name is not None else ""
logger.info(colored("Contents of DataFlow%s:" % label, 'cyan'))
if self.cnt < self.num:
print(self._get_msg(dp))
self.cnt += 1
yield dp
def reset_state(self):
super(PrintData, self).reset_state()
self.cnt = 0
| 29,348 | 33.568905 | 110 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/serialize_test.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import tempfile
import numpy as np
import os
import unittest
from tensorpack.dataflow import HDF5Serializer, LMDBSerializer, NumpySerializer, TFRecordSerializer
from tensorpack.dataflow.base import DataFlow
def delete_file_if_exists(fn):
try:
os.remove(fn)
except OSError:
pass
class SeededFakeDataFlow(DataFlow):
"""docstring for SeededFakeDataFlow"""
def __init__(self, seed=42, size=32):
super(SeededFakeDataFlow, self).__init__()
self.seed = seed
self._size = size
self.cache = []
def reset_state(self):
np.random.seed(self.seed)
for _ in range(self._size):
label = np.random.randint(low=0, high=10)
img = np.random.randn(28, 28, 3)
self.cache.append([label, img])
def __len__(self):
return self._size
def __iter__(self):
for dp in self.cache:
yield dp
class SerializerTest(unittest.TestCase):
def run_write_read_test(self, file, serializer, w_args, w_kwargs, r_args, r_kwargs, error_msg):
try:
delete_file_if_exists(file)
ds_expected = SeededFakeDataFlow()
serializer.save(ds_expected, file, *w_args, **w_kwargs)
ds_actual = serializer.load(file, *r_args, **r_kwargs)
ds_actual.reset_state()
ds_expected.reset_state()
for dp_expected, dp_actual in zip(ds_expected.__iter__(), ds_actual.__iter__()):
self.assertEqual(dp_expected[0], dp_actual[0])
self.assertTrue(np.allclose(dp_expected[1], dp_actual[1]))
except ImportError:
print(error_msg)
def test_lmdb(self):
with tempfile.TemporaryDirectory() as f:
self.run_write_read_test(
os.path.join(f, 'test.lmdb'),
LMDBSerializer,
{}, {},
{}, {'shuffle': False},
'Skip test_lmdb, no lmdb available')
def test_tfrecord(self):
with tempfile.TemporaryDirectory() as f:
self.run_write_read_test(
os.path.join(f, 'test.tfrecord'),
TFRecordSerializer,
{}, {},
{}, {'size': 32},
'Skip test_tfrecord, no tensorflow available')
def test_numpy(self):
with tempfile.TemporaryDirectory() as f:
self.run_write_read_test(
os.path.join(f, 'test.npz'),
NumpySerializer,
{}, {},
{}, {'shuffle': False},
'Skip test_numpy, no numpy available')
def test_hdf5(self):
args = [['label', 'image']]
with tempfile.TemporaryDirectory() as f:
self.run_write_read_test(
os.path.join(f, 'test.h5'),
HDF5Serializer,
args, {},
args, {'shuffle': False},
'Skip test_hdf5, no h5py available')
if __name__ == '__main__':
unittest.main()
| 3,052 | 28.640777 | 99 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/raw.py | # -*- coding: utf-8 -*-
# File: raw.py
import copy
import numpy as np
import six
from .base import DataFlow, RNGDataFlow
__all__ = ['FakeData', 'DataFromQueue', 'DataFromList', 'DataFromGenerator', 'DataFromIterable']
class FakeData(RNGDataFlow):
""" Generate fake data of given shapes"""
def __init__(self, shapes, size=1000, random=True, dtype='float32', domain=(0, 1)):
"""
Args:
shapes (list): a list of lists/tuples. Shapes of each component.
size (int): size of this DataFlow.
random (bool): whether to randomly generate data every iteration.
Note that merely generating the data could sometimes be time-consuming!
dtype (str or list): data type as string, or a list of data types.
domain (tuple or list): (min, max) tuple, or a list of such tuples
"""
super(FakeData, self).__init__()
self.shapes = shapes
self._size = int(size)
self.random = random
self.dtype = [dtype] * len(shapes) if isinstance(dtype, six.string_types) else dtype
self.domain = [domain] * len(shapes) if isinstance(domain, tuple) else domain
assert len(self.dtype) == len(self.shapes)
assert len(self.domain) == len(self.domain)
def __len__(self):
return self._size
def __iter__(self):
if self.random:
for _ in range(self._size):
val = []
for k in range(len(self.shapes)):
v = self.rng.rand(*self.shapes[k]) * (self.domain[k][1] - self.domain[k][0]) + self.domain[k][0]
val.append(v.astype(self.dtype[k]))
yield val
else:
val = []
for k in range(len(self.shapes)):
v = self.rng.rand(*self.shapes[k]) * (self.domain[k][1] - self.domain[k][0]) + self.domain[k][0]
val.append(v.astype(self.dtype[k]))
for _ in range(self._size):
yield copy.copy(val)
class DataFromQueue(DataFlow):
""" Produce data from a queue """
def __init__(self, queue):
"""
Args:
queue (queue): a queue with ``get()`` method.
"""
self.queue = queue
def __iter__(self):
while True:
yield self.queue.get()
class DataFromList(RNGDataFlow):
""" Wrap a list of datapoints to a DataFlow"""
def __init__(self, lst, shuffle=True):
"""
Args:
lst (list): input list. Each element is a datapoint.
shuffle (bool): shuffle data.
"""
super(DataFromList, self).__init__()
self.lst = lst
self.shuffle = shuffle
def __len__(self):
return len(self.lst)
def __iter__(self):
if not self.shuffle:
yield from self.lst
else:
idxs = np.arange(len(self.lst))
self.rng.shuffle(idxs)
for k in idxs:
yield self.lst[k]
class DataFromGenerator(DataFlow):
"""
Wrap a generator to a DataFlow.
The dataflow will not have length.
"""
def __init__(self, gen):
"""
Args:
gen: iterable, or a callable that returns an iterable
"""
self._gen = gen
def __iter__(self):
if not callable(self._gen):
yield from self._gen
else:
yield from self._gen()
def __len__(self):
return len(self._gen)
class DataFromIterable(DataFlow):
""" Wrap an iterable of datapoints to a DataFlow"""
def __init__(self, iterable):
"""
Args:
iterable: an iterable object
"""
self._itr = iterable
try:
self._len = len(iterable)
except Exception:
self._len = None
def __len__(self):
if self._len is None:
raise NotImplementedError
return self._len
def __iter__(self):
yield from self._itr
| 3,987 | 27.898551 | 116 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .base import *
from .common import *
from .format import *
from .image import *
from .parallel_map import *
from .parallel import *
from .raw import *
from .remote import *
from .serialize import *
from . import imgaug
from . import dataset
from pkgutil import iter_modules
import os
import os.path
from ..utils.develop import LazyLoader
__all__ = []
def _global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else dir(p)
if lst:
del globals()[name]
for k in lst:
if not k.startswith('__'):
globals()[k] = p.__dict__[k]
__all__.append(k)
__SKIP = set(['dataset', 'imgaug'])
_CURR_DIR = os.path.dirname(__file__)
for _, module_name, __ in iter_modules(
[os.path.dirname(__file__)]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if "_test" not in module_name and \
not module_name.startswith('_') and \
module_name not in __SKIP:
_global_import(module_name)
globals()['dataset'] = LazyLoader('dataset', globals(), __name__ + '.dataset')
globals()['imgaug'] = LazyLoader('imgaug', globals(), __name__ + '.imgaug')
del LazyLoader
__all__.extend(['imgaug', 'dataset'])
| 1,597 | 26.084746 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/remote.py | # -*- coding: utf-8 -*-
# File: remote.py
import multiprocessing as mp
import time
from collections import deque
import tqdm
from ..utils import logger
from ..utils.concurrency import DIE
from ..utils.serialize import dumps, loads
from ..utils.utils import get_tqdm_kwargs
from .base import DataFlow, DataFlowReentrantGuard
try:
import zmq
except ImportError:
logger.warn("Error in 'import zmq'. remote feature won't be available")
__all__ = []
else:
__all__ = ['send_dataflow_zmq', 'RemoteDataZMQ']
def send_dataflow_zmq(df, addr, hwm=50, format=None, bind=False):
"""
Run DataFlow and send data to a ZMQ socket addr.
It will serialize and send each datapoint to this address with a PUSH socket.
This function never returns.
Args:
df (DataFlow): Will infinitely loop over the DataFlow.
addr: a ZMQ socket endpoint.
hwm (int): ZMQ high-water mark (buffer size)
format (str): The serialization format.
Default format uses :mod:`utils.serialize`.
This format works with :class:`dataflow.RemoteDataZMQ`.
An alternate format is 'zmq_ops', used by https://github.com/tensorpack/zmq_ops
and :class:`input_source.ZMQInput`.
bind (bool): whether to bind or connect to the endpoint address.
"""
assert format in [None, 'zmq_op', 'zmq_ops']
if format is None:
dump_fn = dumps
else:
from zmq_ops import dump_arrays
dump_fn = dump_arrays
ctx = zmq.Context()
socket = ctx.socket(zmq.PUSH)
socket.set_hwm(hwm)
if bind:
socket.bind(addr)
else:
socket.connect(addr)
try:
df.reset_state()
logger.info("Serving data to {} with {} format ...".format(
addr, 'default' if format is None else 'zmq_ops'))
INTERVAL = 200
q = deque(maxlen=INTERVAL)
try:
total = len(df)
except NotImplementedError:
total = 0
tqdm_args = get_tqdm_kwargs(leave=True, smoothing=0.8)
tqdm_args['bar_format'] = tqdm_args['bar_format'] + "{postfix}"
while True:
with tqdm.trange(total, **tqdm_args) as pbar:
for dp in df:
start = time.time()
socket.send(dump_fn(dp), copy=False)
q.append(time.time() - start)
pbar.update(1)
if pbar.n % INTERVAL == 0:
avg = "{:.3f}".format(sum(q) / len(q))
pbar.set_postfix({'AvgSendLat': avg})
finally:
logger.info("Exiting send_dataflow_zmq ...")
socket.setsockopt(zmq.LINGER, 0)
socket.close()
if not ctx.closed:
ctx.destroy(0)
class RemoteDataZMQ(DataFlow):
"""
Produce data from ZMQ PULL socket(s).
It is the receiver-side counterpart of :func:`send_dataflow_zmq`, which uses :mod:`tensorpack.utils.serialize`
for serialization.
See http://tensorpack.readthedocs.io/tutorial/efficient-dataflow.html#distributed-dataflow
Attributes:
cnt1, cnt2 (int): number of data points received from addr1 and addr2
"""
def __init__(self, addr1, addr2=None, hwm=50, bind=True):
"""
Args:
addr1,addr2 (str): addr of the zmq endpoint to connect to.
Use both if you need two protocols (e.g. both IPC and TCP).
I don't think you'll ever need 3.
hwm (int): ZMQ high-water mark (buffer size)
bind (bool): whether to connect or bind the endpoint
"""
assert addr1
self._addr1 = addr1
self._addr2 = addr2
self._hwm = int(hwm)
self._guard = DataFlowReentrantGuard()
self._bind = bind
def reset_state(self):
self.cnt1 = 0
self.cnt2 = 0
def bind_or_connect(self, socket, addr):
if self._bind:
socket.bind(addr)
else:
socket.connect(addr)
def __iter__(self):
with self._guard:
try:
ctx = zmq.Context()
if self._addr2 is None:
socket = ctx.socket(zmq.PULL)
socket.set_hwm(self._hwm)
self.bind_or_connect(socket, self._addr1)
while True:
dp = loads(socket.recv(copy=False))
yield dp
self.cnt1 += 1
else:
socket1 = ctx.socket(zmq.PULL)
socket1.set_hwm(self._hwm)
self.bind_or_connect(socket1, self._addr1)
socket2 = ctx.socket(zmq.PULL)
socket2.set_hwm(self._hwm)
self.bind_or_connect(socket2, self._addr2)
poller = zmq.Poller()
poller.register(socket1, zmq.POLLIN)
poller.register(socket2, zmq.POLLIN)
while True:
evts = poller.poll()
for sock, evt in evts:
dp = loads(sock.recv(copy=False))
yield dp
if sock == socket1:
self.cnt1 += 1
else:
self.cnt2 += 1
finally:
ctx.destroy(linger=0)
# for internal use only
def dump_dataflow_to_process_queue(df, size, nr_consumer):
"""
Convert a DataFlow to a :class:`multiprocessing.Queue`.
The DataFlow will only be reset in the spawned process.
Args:
df (DataFlow): the DataFlow to dump.
size (int): size of the queue
nr_consumer (int): number of consumer of the queue.
The producer will add this many of ``DIE`` sentinel to the end of the queue.
Returns:
tuple(queue, process):
The process will take data from ``df`` and fill
the queue, once you start it. Each element in the queue is (idx,
dp). idx can be the ``DIE`` sentinel when ``df`` is exhausted.
"""
q = mp.Queue(size)
class EnqueProc(mp.Process):
def __init__(self, df, q, nr_consumer):
super(EnqueProc, self).__init__()
self.df = df
self.q = q
def run(self):
self.df.reset_state()
try:
for idx, dp in enumerate(self.df):
self.q.put((idx, dp))
finally:
for _ in range(nr_consumer):
self.q.put((DIE, None))
proc = EnqueProc(df, q, nr_consumer)
return q, proc
if __name__ == '__main__':
from argparse import ArgumentParser
from .raw import FakeData
from .common import TestDataSpeed
"""
Test the multi-producer single-consumer model
"""
parser = ArgumentParser()
parser.add_argument('-t', '--task', choices=['send', 'recv'], required=True)
parser.add_argument('-a', '--addr1', required=True)
parser.add_argument('-b', '--addr2', default=None)
args = parser.parse_args()
# tcp addr like "tcp://127.0.0.1:8877"
# ipc addr like "ipc://@ipc-test"
if args.task == 'send':
# use random=True to make it slow and cpu-consuming
ds = FakeData([(128, 244, 244, 3)], 1000, random=True)
send_dataflow_zmq(ds, args.addr1)
else:
ds = RemoteDataZMQ(args.addr1, args.addr2)
logger.info("Each DP is 73.5MB")
TestDataSpeed(ds).start_test()
| 7,562 | 32.464602 | 114 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/serialize.py | # -*- coding: utf-8 -*-
# File: serialize.py
import numpy as np
import os
import platform
from collections import defaultdict
from ..utils import logger
from ..utils.serialize import dumps, loads
from ..utils.develop import create_dummy_class # noqa
from ..utils.utils import get_tqdm
from .base import DataFlow
from .common import FixedSizeData, MapData
from .format import HDF5Data, LMDBData
from .raw import DataFromGenerator, DataFromList
__all__ = ['LMDBSerializer', 'NumpySerializer', 'TFRecordSerializer', 'HDF5Serializer']
def _reset_df_and_get_size(df):
df.reset_state()
try:
sz = len(df)
except NotImplementedError:
sz = 0
return sz
class LMDBSerializer():
"""
Serialize a Dataflow to a lmdb database, where the keys are indices and values
are serialized datapoints.
You will need to ``pip install lmdb`` to use it.
Example:
.. code-block:: python
LMDBSerializer.save(my_df, "output.lmdb")
new_df = LMDBSerializer.load("output.lmdb", shuffle=True)
"""
@staticmethod
def save(df, path, write_frequency=5000):
"""
Args:
df (DataFlow): the DataFlow to serialize.
path (str): output path. Either a directory or an lmdb file.
write_frequency (int): the frequency to write back data to disk.
A smaller value reduces memory usage.
"""
assert isinstance(df, DataFlow), type(df)
isdir = os.path.isdir(path)
if isdir:
assert not os.path.isfile(os.path.join(path, 'data.mdb')), "LMDB file exists!"
else:
assert not os.path.isfile(path), "LMDB file {} exists!".format(path)
# It's OK to use super large map_size on Linux, but not on other platforms
# See: https://github.com/NVIDIA/DIGITS/issues/206
map_size = 1099511627776 * 2 if platform.system() == 'Linux' else 128 * 10**6
db = lmdb.open(path, subdir=isdir,
map_size=map_size, readonly=False,
meminit=False, map_async=True) # need sync() at the end
size = _reset_df_and_get_size(df)
# put data into lmdb, and doubling the size if full.
# Ref: https://github.com/NVIDIA/DIGITS/pull/209/files
def put_or_grow(txn, key, value):
try:
txn.put(key, value)
return txn
except lmdb.MapFullError:
pass
txn.abort()
curr_size = db.info()['map_size']
new_size = curr_size * 2
logger.info("Doubling LMDB map_size to {:.2f}GB".format(new_size / 10**9))
db.set_mapsize(new_size)
txn = db.begin(write=True)
txn = put_or_grow(txn, key, value)
return txn
with get_tqdm(total=size) as pbar:
idx = -1
# LMDB transaction is not exception-safe!
# although it has a context manager interface
txn = db.begin(write=True)
for idx, dp in enumerate(df):
txn = put_or_grow(txn, u'{:08}'.format(idx).encode('ascii'), dumps(dp))
pbar.update()
if (idx + 1) % write_frequency == 0:
txn.commit()
txn = db.begin(write=True)
txn.commit()
keys = [u'{:08}'.format(k).encode('ascii') for k in range(idx + 1)]
with db.begin(write=True) as txn:
txn = put_or_grow(txn, b'__keys__', dumps(keys))
logger.info("Flushing database ...")
db.sync()
db.close()
@staticmethod
def load(path, shuffle=True):
"""
Note:
If you found deserialization being the bottleneck, you can use :class:`LMDBData` as the reader
and run deserialization as a mapper in parallel.
"""
df = LMDBData(path, shuffle=shuffle)
return MapData(df, LMDBSerializer._deserialize_lmdb)
@staticmethod
def _deserialize_lmdb(dp):
return loads(dp[1])
class NumpySerializer():
"""
Serialize the entire dataflow to a npz dict.
Note that this would have to store the entire dataflow in memory,
and is also >10x slower than LMDB/TFRecord serializers.
"""
@staticmethod
def save(df, path):
"""
Args:
df (DataFlow): the DataFlow to serialize.
path (str): output npz file.
"""
buffer = []
size = _reset_df_and_get_size(df)
with get_tqdm(total=size) as pbar:
for dp in df:
buffer.append(dp)
pbar.update()
np.savez_compressed(path, buffer=np.asarray(buffer, dtype=np.object))
@staticmethod
def load(path, shuffle=True):
# allow_pickle defaults to False since numpy 1.16.3
# (https://www.numpy.org/devdocs/release.html#unpickling-while-loading-requires-explicit-opt-in)
buffer = np.load(path, allow_pickle=True)['buffer']
return DataFromList(buffer, shuffle=shuffle)
class TFRecordSerializer():
"""
Serialize datapoints to bytes (by tensorpack's default serializer) and write to a TFRecord file.
Note that TFRecord does not support random access and is in fact not very performant.
It's better to use :class:`LMDBSerializer`.
"""
@staticmethod
def save(df, path):
"""
Args:
df (DataFlow): the DataFlow to serialize.
path (str): output tfrecord file.
"""
size = _reset_df_and_get_size(df)
with tf.python_io.TFRecordWriter(path) as writer, get_tqdm(total=size) as pbar:
for dp in df:
writer.write(dumps(dp))
pbar.update()
@staticmethod
def load(path, size=None):
"""
Args:
size (int): total number of records. If not provided, the returned dataflow will have no `__len__()`.
It's needed because this metadata is not stored in the TFRecord file.
"""
gen = tf.python_io.tf_record_iterator(path)
ds = DataFromGenerator(gen)
ds = MapData(ds, loads)
if size is not None:
ds = FixedSizeData(ds, size)
return ds
class HDF5Serializer():
"""
Write datapoints to a HDF5 file.
Note that HDF5 files are in fact not very performant and currently do not support lazy loading.
It's better to use :class:`LMDBSerializer`.
"""
@staticmethod
def save(df, path, data_paths):
"""
Args:
df (DataFlow): the DataFlow to serialize.
path (str): output hdf5 file.
data_paths (list[str]): list of h5 paths. It should have the same
length as each datapoint, and each path should correspond to one
component of the datapoint.
"""
size = _reset_df_and_get_size(df)
buffer = defaultdict(list)
with get_tqdm(total=size) as pbar:
for dp in df:
assert len(dp) == len(data_paths), "Datapoint has {} components!".format(len(dp))
for k, el in zip(data_paths, dp):
buffer[k].append(el)
pbar.update()
with h5py.File(path, 'w') as hf, get_tqdm(total=len(data_paths)) as pbar:
for data_path in data_paths:
hf.create_dataset(data_path, data=buffer[data_path])
pbar.update()
@staticmethod
def load(path, data_paths, shuffle=True):
"""
Args:
data_paths (list): list of h5 paths to be zipped.
"""
return HDF5Data(path, data_paths, shuffle)
try:
import lmdb
except ImportError:
LMDBSerializer = create_dummy_class('LMDBSerializer', 'lmdb') # noqa
try:
import tensorflow as tf
except ImportError:
TFRecordSerializer = create_dummy_class('TFRecordSerializer', 'tensorflow') # noqa
try:
import h5py
except ImportError:
HDF5Serializer = create_dummy_class('HDF5Serializer', 'h5py') # noqa
if __name__ == '__main__':
from .raw import FakeData
import time
ds = FakeData([[300, 300, 3], [1]], 1000)
print(time.time())
TFRecordSerializer.save(ds, 'out.tfrecords')
print(time.time())
df = TFRecordSerializer.load('out.tfrecords', size=1000)
df.reset_state()
for idx, dp in enumerate(df):
pass
print("TF Finished, ", idx)
print(time.time())
LMDBSerializer.save(ds, 'out.lmdb')
print(time.time())
df = LMDBSerializer.load('out.lmdb')
df.reset_state()
for idx, dp in enumerate(df):
pass
print("LMDB Finished, ", idx)
print(time.time())
NumpySerializer.save(ds, 'out.npz')
print(time.time())
df = NumpySerializer.load('out.npz')
df.reset_state()
for idx, dp in enumerate(df):
pass
print("Numpy Finished, ", idx)
print(time.time())
paths = ['p1', 'p2']
HDF5Serializer.save(ds, 'out.h5', paths)
print(time.time())
df = HDF5Serializer.load('out.h5', paths)
df.reset_state()
for idx, dp in enumerate(df):
pass
print("HDF5 Finished, ", idx)
print(time.time())
| 9,180 | 31.101399 | 113 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/parallel.py | # -*- coding: utf-8 -*-
# File: parallel.py
import atexit
import pickle
import errno
import traceback
import itertools
import multiprocessing as mp
import os
import sys
import uuid
import weakref
from contextlib import contextmanager
import zmq
from six.moves import queue, range
from ..utils import logger
from ..utils.concurrency import (
StoppableThread, enable_death_signal, ensure_proc_terminate, start_proc_mask_signal)
from ..utils.serialize import dumps_once as dumps, loads_once as loads
from .base import DataFlow, DataFlowReentrantGuard, DataFlowTerminated, ProxyDataFlow
__all__ = ['PrefetchData', 'MultiProcessPrefetchData',
'MultiProcessRunner', 'MultiProcessRunnerZMQ', 'MultiThreadRunner',
'PrefetchDataZMQ', 'MultiThreadPrefetchData']
# from https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/__init__.py
class _ExceptionWrapper:
MAGIC = b"EXC_MAGIC"
"""Wraps an exception plus traceback to communicate across threads"""
def __init__(self, exc_info):
# It is important that we don't store exc_info, see
# NOTE [ Python Traceback Reference Cycle Problem ]
self.exc_type = exc_info[0]
self.exc_msg = "".join(traceback.format_exception(*exc_info))
def pack(self):
return self.MAGIC + pickle.dumps(self)
@staticmethod
def unpack(dp):
if isinstance(dp, bytes) and dp.startswith(_ExceptionWrapper.MAGIC):
return pickle.loads(dp[len(_ExceptionWrapper.MAGIC):])
def _repeat_iter(get_itr):
while True:
yield from get_itr()
def _bind_guard(sock, name):
try:
sock.bind(name)
except zmq.ZMQError:
logger.error(
"ZMQError in socket.bind('{}'). Perhaps you're \
using pipes on a non-local file system. See documentation of MultiProcessRunnerZMQ \
for more information.".format(name))
raise
def _get_pipe_name(name):
if sys.platform.startswith('linux'):
# linux supports abstract sockets: http://api.zeromq.org/4-1:zmq-ipc
pipename = "ipc://@{}-pipe-{}".format(name, str(uuid.uuid1())[:8])
pipedir = os.environ.get('TENSORPACK_PIPEDIR', None)
if pipedir is not None:
logger.warn("TENSORPACK_PIPEDIR is not used on Linux any more! Abstract sockets will be used.")
else:
pipedir = os.environ.get('TENSORPACK_PIPEDIR', None)
if pipedir is not None:
logger.info("ZMQ uses TENSORPACK_PIPEDIR={}".format(pipedir))
else:
pipedir = '.'
assert os.path.isdir(pipedir), pipedir
filename = '{}/{}-pipe-{}'.format(pipedir.rstrip('/'), name, str(uuid.uuid1())[:6])
assert not os.path.exists(filename), "Pipe {} exists! You may be unlucky.".format(filename)
pipename = "ipc://{}".format(filename)
return pipename
def del_weakref(x):
o = x()
if o is not None:
o.__del__()
@contextmanager
def _zmq_catch_error(name):
try:
yield
except zmq.ContextTerminated:
logger.info("[{}] Context terminated.".format(name))
raise DataFlowTerminated()
except zmq.ZMQError as e:
if e.errno == errno.ENOTSOCK: # socket closed
logger.info("[{}] Socket closed.".format(name))
raise DataFlowTerminated()
else:
raise
except Exception:
raise
class _MultiProcessZMQDataFlow(DataFlow):
def __init__(self):
assert os.name != 'nt', "ZMQ IPC doesn't support windows!"
self._reset_done = False
self._procs = []
def reset_state(self):
"""
All forked dataflows should only be reset **once and only once** in spawned processes.
Subclasses should call this method with super.
"""
assert not self._reset_done, "reset_state() was called twice! This violates the API of DataFlow!"
self._reset_done = True
# __del__ not guaranteed to get called at exit
atexit.register(del_weakref, weakref.ref(self))
def _start_processes(self):
start_proc_mask_signal(self._procs)
def __del__(self):
try:
if not self._reset_done:
return
if not self.context.closed:
self.socket.close(0)
self.context.destroy(0)
for x in self._procs:
x.terminate()
x.join(5)
print("{} successfully cleaned-up.".format(type(self).__name__))
except Exception:
pass
class MultiProcessRunner(ProxyDataFlow):
"""
Running a DataFlow in >=1 processes using Python multiprocessing utilities.
It will fork the process that calls :meth:`__init__`, collect datapoints from `ds` in each
process by a Python :class:`multiprocessing.Queue`.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that the process will be forked ``num_proc`` times.
There will be ``num_proc`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_proc=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_proc>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_proc`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
2. This has more serialization overhead than :class:`MultiProcessRunnerZMQ` when data is large.
3. You can nest like this: ``MultiProcessRunnerZMQ(MultiProcessRunner(df, num_proc=a), num_proc=b)``.
A total of ``a`` instances of ``df`` worker processes will be created.
4. Fork happens in `__init__`. `reset_state()` is a no-op.
DataFlow in the worker processes will be reset at the time of fork.
5. This DataFlow does support windows. However, Windows requires more strict picklability on processes,
which means that some code that's forkable on Linux may not be forkable on Windows. If that happens you'll
need to re-organize some part of code that's not forkable.
"""
class _Worker(mp.Process):
def __init__(self, ds, queue, idx):
super(MultiProcessRunner._Worker, self).__init__()
self.ds = ds
self.queue = queue
self.idx = idx
def run(self):
enable_death_signal(_warn=self.idx == 0)
# reset all ds so each process will produce different data
self.ds.reset_state()
while True:
for dp in self.ds:
self.queue.put(dp)
def __init__(self, ds, num_prefetch, num_proc):
"""
Args:
ds (DataFlow): input DataFlow.
num_prefetch (int): size of the queue to hold prefetched datapoints.
Required.
num_proc (int): number of processes to use. Required.
"""
# https://docs.python.org/3.6/library/multiprocessing.html?highlight=process#the-spawn-and-forkserver-start-methods
if os.name == 'nt':
logger.warn("MultiProcessRunner does support Windows. \
However, Windows requires more strict picklability on processes, which may \
lead of failure on some of the code.")
super(MultiProcessRunner, self).__init__(ds)
try:
self._size = len(ds)
except NotImplementedError:
self._size = -1
assert num_proc > 0, num_proc
assert num_prefetch > 0, num_prefetch
self.num_proc = num_proc
self.num_prefetch = num_prefetch
if num_proc > 1:
logger.info("[MultiProcessRunner] Will fork a dataflow more than one times. "
"This assumes the datapoints are i.i.d.")
self.queue = mp.Queue(self.num_prefetch)
self.procs = [MultiProcessRunner._Worker(self.ds, self.queue, idx)
for idx in range(self.num_proc)]
ensure_proc_terminate(self.procs)
self._reset_done = False
def __iter__(self):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
dp = self.queue.get()
yield dp
def reset_state(self):
assert not self._reset_done, "reset_state() was called twice! This violates the API of DataFlow!"
self._reset_done = True
start_proc_mask_signal(self.procs)
class MultiProcessRunnerZMQ(_MultiProcessZMQDataFlow):
"""
Run a DataFlow in >=1 processes, with ZeroMQ for communication.
It will fork the calling process of :meth:`reset_state()`,
and collect datapoints from the given dataflow in each process by ZeroMQ IPC pipe.
This is typically faster than :class:`MultiProcessRunner`.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that the process will be forked ``num_proc`` times.
There will be ``num_proc`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_proc=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_proc>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_proc`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
2. `reset_state()` of the given dataflow will be called **once and only once** in the worker processes.
3. The fork of processes happened in this dataflow's `reset_state()` method.
Please note that forking a TensorFlow GPU session may be unsafe.
If you're managing this dataflow on your own,
it's better to fork before creating the session.
4. (Fork-safety) After the fork has happened, this dataflow becomes not fork-safe.
i.e., if you fork an already reset instance of this dataflow,
it won't be usable in the forked process. Therefore, do not nest two `MultiProcessRunnerZMQ`.
5. (Thread-safety) ZMQ is not thread safe. Therefore, do not call :meth:`get_data` of the same dataflow in
more than 1 threads.
6. This dataflow does not support windows. Use `MultiProcessRunner` which works on windows.
7. (For Mac only) A UNIX named pipe will be created in the current directory.
However, certain non-local filesystem such as NFS/GlusterFS/AFS doesn't always support pipes.
You can change the directory by ``export TENSORPACK_PIPEDIR=/other/dir``.
In particular, you can use somewhere under '/tmp' which is usually local.
Note that some non-local FS may appear to support pipes and code
may appear to run but crash with bizarre error.
Also note that ZMQ limits the maximum length of pipe path.
If you hit the limit, you can set the directory to a softlink
which points to a local directory.
"""
class _Worker(mp.Process):
def __init__(self, ds, conn_name, hwm, idx):
super(MultiProcessRunnerZMQ._Worker, self).__init__()
self.ds = ds
self.conn_name = conn_name
self.hwm = hwm
self.idx = idx
def run(self):
enable_death_signal(_warn=self.idx == 0)
self.ds.reset_state()
itr = _repeat_iter(lambda: self.ds)
context = zmq.Context()
socket = context.socket(zmq.PUSH)
socket.set_hwm(self.hwm)
socket.connect(self.conn_name)
try:
while True:
try:
dp = next(itr)
socket.send(dumps(dp), copy=False)
except Exception:
dp = _ExceptionWrapper(sys.exc_info()).pack()
socket.send(dumps(dp), copy=False)
raise
# sigint could still propagate here, e.g. when nested
except KeyboardInterrupt:
pass
finally:
socket.close(0)
context.destroy(0)
def __init__(self, ds, num_proc=1, hwm=50):
"""
Args:
ds (DataFlow): input DataFlow.
num_proc (int): number of processes to use.
hwm (int): the zmq "high-water mark" (queue size) for both sender and receiver.
"""
super(MultiProcessRunnerZMQ, self).__init__()
self.ds = ds
self.num_proc = num_proc
self._hwm = hwm
if num_proc > 1:
logger.info("[MultiProcessRunnerZMQ] Will fork a dataflow more than one times. "
"This assumes the datapoints are i.i.d.")
try:
self._size = ds.__len__()
except NotImplementedError:
self._size = -1
def _recv(self):
ret = loads(self.socket.recv(copy=False))
exc = _ExceptionWrapper.unpack(ret)
if exc is not None:
logger.error("Exception '{}' in worker:".format(str(exc.exc_type)))
raise exc.exc_type(exc.exc_msg)
return ret
def __len__(self):
return self.ds.__len__()
def __iter__(self):
with self._guard, _zmq_catch_error('MultiProcessRunnerZMQ'):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
yield self._recv()
def reset_state(self):
super(MultiProcessRunnerZMQ, self).reset_state()
self._guard = DataFlowReentrantGuard()
self.context = zmq.Context()
self.socket = self.context.socket(zmq.PULL)
self.socket.set_hwm(self._hwm)
pipename = _get_pipe_name('dataflow')
_bind_guard(self.socket, pipename)
self._procs = [MultiProcessRunnerZMQ._Worker(self.ds, pipename, self._hwm, idx)
for idx in range(self.num_proc)]
self._start_processes()
class MultiThreadRunner(DataFlow):
"""
Create multiple dataflow instances and run them each in one thread.
Collect outputs from them with a queue.
Note:
1. (Data integrity) An iterator cannot run faster automatically -- what's happening is
that each thread will create a dataflow iterator.
There will be ``num_thread`` dataflow running in parallel and **independently**.
As a result, we have the following guarantee on the dataflow correctness:
a. When ``num_thread=1``, this dataflow produces the same data as the
given dataflow in the same order.
b. When ``num_thread>1``, if each sample from the given dataflow is i.i.d.,
then this dataflow produces the **same distribution** of data as the given dataflow.
This implies that there will be duplication, reordering, etc.
You probably only want to use it for training.
For example, if your original dataflow contains no randomness and produces the same first datapoint,
then after parallel prefetching, the datapoint will be produced ``num_thread`` times
at the beginning.
Even when your original dataflow is fully shuffled, you still need to be aware of the
`Birthday Paradox <https://en.wikipedia.org/wiki/Birthday_problem>`_
and know that you'll likely see duplicates.
To utilize parallelism with more strict data integrity, you can use
the parallel versions of :class:`MapData`: :class:`MultiThreadMapData`, :class:`MultiProcessMapData`.
"""
class _Worker(StoppableThread):
def __init__(self, get_df, queue):
super(MultiThreadRunner._Worker, self).__init__()
self.df = get_df()
assert isinstance(self.df, DataFlow), self.df
self.queue = queue
self.daemon = True
def run(self):
self.df.reset_state()
try:
while True:
for dp in self.df:
if self.stopped():
return
self.queue_put_stoppable(self.queue, dp)
except Exception:
if self.stopped():
pass # skip duplicated error messages
else:
raise
finally:
self.stop()
def __init__(self, get_df, num_prefetch, num_thread):
"""
Args:
get_df ( -> DataFlow): a callable which returns a DataFlow.
Each thread will call this function to get the DataFlow to use.
Therefore do not return the same DataFlow object for each call,
unless your dataflow is stateless.
num_prefetch (int): size of the queue
num_thread (int): number of threads
"""
assert num_thread > 0, num_thread
assert num_prefetch > 0, num_prefetch
self.num_thread = num_thread
self.queue = queue.Queue(maxsize=num_prefetch)
self.threads = [
MultiThreadRunner._Worker(get_df, self.queue)
for _ in range(num_thread)]
try:
self._size = self.__len__()
except NotImplementedError:
self._size = -1
def reset_state(self):
for th in self.threads:
th.df.reset_state()
th.start()
def __len__(self):
return self.threads[0].df.__len__()
def __iter__(self):
for k in itertools.count():
if self._size > 0 and k >= self._size:
break
yield self.queue.get()
def __del__(self):
for p in self.threads:
if p.is_alive():
p.stop()
p.join()
class PlasmaPutData(ProxyDataFlow):
"""
Put each data point to plasma shared memory object store, and yield the object id instead.
Experimental.
"""
def __init__(self, ds, socket="/tmp/plasma"):
self._socket = socket
super(PlasmaPutData, self).__init__(ds)
def reset_state(self):
super(PlasmaPutData, self).reset_state()
self.client = plasma.connect(self._socket, "", 0)
def __iter__(self):
for dp in self.ds:
oid = self.client.put(dp)
yield [oid.binary()]
class PlasmaGetData(ProxyDataFlow):
"""
Take plasma object id from a DataFlow, and retrieve it from plasma shared
memory object store.
Experimental.
"""
def __init__(self, ds, socket="/tmp/plasma"):
self._socket = socket
super(PlasmaGetData, self).__init__(ds)
def reset_state(self):
super(PlasmaGetData, self).reset_state()
self.client = plasma.connect(self._socket, "", 0)
def __iter__(self):
for dp in self.ds:
oid = plasma.ObjectID(dp[0])
dp = self.client.get(oid)
yield dp
plasma = None
# These plasma code is only experimental
# try:
# import pyarrow.plasma as plasma
# except ImportError:
# from ..utils.develop import create_dummy_class
# PlasmaPutData = create_dummy_class('PlasmaPutData', 'pyarrow') # noqa
# PlasmaGetData = create_dummy_class('PlasmaGetData', 'pyarrow') # noqa
# The old inappropriate names:
PrefetchData = MultiProcessRunner
MultiProcessPrefetchData = MultiProcessRunner
PrefetchDataZMQ = MultiProcessRunnerZMQ
MultiThreadPrefetchData = MultiThreadRunner
if __name__ == '__main__':
import time
from .raw import DataFromGenerator
from .common import FixedSizeData
x = DataFromGenerator(itertools.count())
x = FixedSizeData(x, 100)
x = MultiProcessRunnerZMQ(x, 2)
x.reset_state()
for idx, dp in enumerate(x):
print(dp)
time.sleep(0.1)
| 21,575 | 38.588991 | 123 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/dataset/bsds500.py | # -*- coding: utf-8 -*-
# File: bsds500.py
import glob
import numpy as np
import os
from ...utils.fs import download, get_dataset_path
from ..base import RNGDataFlow
__all__ = ['BSDS500']
DATA_URL = "http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/BSR/BSR_bsds500.tgz"
DATA_SIZE = 70763455
IMG_W, IMG_H = 481, 321
class BSDS500(RNGDataFlow):
"""
`Berkeley Segmentation Data Set and Benchmarks 500 dataset
<http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/resources.html#bsds500>`_.
Produce ``(image, label)`` pair, where ``image`` has shape (321, 481, 3(BGR)) and
ranges in [0,255].
``Label`` is a floating point image of shape (321, 481) in range [0, 1].
The value of each pixel is ``number of times it is annotated as edge / total number of annotators for this image``.
"""
def __init__(self, name, data_dir=None, shuffle=True):
"""
Args:
name (str): 'train', 'test', 'val'
data_dir (str): a directory containing the original 'BSR' directory.
"""
# check and download data
if data_dir is None:
data_dir = get_dataset_path('bsds500_data')
if not os.path.isdir(os.path.join(data_dir, 'BSR')):
download(DATA_URL, data_dir, expect_size=DATA_SIZE)
filename = DATA_URL.split('/')[-1]
filepath = os.path.join(data_dir, filename)
import tarfile
tarfile.open(filepath, 'r:gz').extractall(data_dir)
self.data_root = os.path.join(data_dir, 'BSR', 'BSDS500', 'data')
assert os.path.isdir(self.data_root)
self.shuffle = shuffle
assert name in ['train', 'test', 'val']
self._load(name)
def _load(self, name):
image_glob = os.path.join(self.data_root, 'images', name, '*.jpg')
image_files = glob.glob(image_glob)
gt_dir = os.path.join(self.data_root, 'groundTruth', name)
self.data = np.zeros((len(image_files), IMG_H, IMG_W, 3), dtype='uint8')
self.label = np.zeros((len(image_files), IMG_H, IMG_W), dtype='float32')
for idx, f in enumerate(image_files):
im = cv2.imread(f, cv2.IMREAD_COLOR)
assert im is not None
if im.shape[0] > im.shape[1]:
im = np.transpose(im, (1, 0, 2))
assert im.shape[:2] == (IMG_H, IMG_W), "{} != {}".format(im.shape[:2], (IMG_H, IMG_W))
imgid = os.path.basename(f).split('.')[0]
gt_file = os.path.join(gt_dir, imgid)
gt = loadmat(gt_file)['groundTruth'][0]
n_annot = gt.shape[0]
gt = sum(gt[k]['Boundaries'][0][0] for k in range(n_annot))
gt = gt.astype('float32')
gt *= 1.0 / n_annot
if gt.shape[0] > gt.shape[1]:
gt = gt.transpose()
assert gt.shape == (IMG_H, IMG_W)
self.data[idx] = im
self.label[idx] = gt
def __len__(self):
return self.data.shape[0]
def __iter__(self):
idxs = np.arange(self.data.shape[0])
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
yield [self.data[k], self.label[k]]
try:
from scipy.io import loadmat
import cv2
except ImportError:
from ...utils.develop import create_dummy_class
BSDS500 = create_dummy_class('BSDS500', ['scipy.io', 'cv2']) # noqa
if __name__ == '__main__':
a = BSDS500('val')
a.reset_state()
for k in a:
cv2.imshow("haha", k[1].astype('uint8') * 255)
cv2.waitKey(1000)
| 3,586 | 33.161905 | 119 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/dataset/ilsvrc.py | # -*- coding: utf-8 -*-
# File: ilsvrc.py
import numpy as np
import os
import tarfile
import tqdm
from ...utils import logger
from ...utils.fs import download, get_dataset_path, mkdir_p
from ...utils.loadcaffe import get_caffe_pb
from ...utils.timer import timed_operation
from ..base import RNGDataFlow
__all__ = ['ILSVRCMeta', 'ILSVRC12', 'ILSVRC12Files']
CAFFE_ILSVRC12_URL = ("http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz", 17858008)
class ILSVRCMeta(object):
"""
Provide methods to access metadata for ILSVRC dataset.
"""
def __init__(self, dir=None):
if dir is None:
dir = get_dataset_path('ilsvrc_metadata')
self.dir = os.path.expanduser(dir)
mkdir_p(self.dir)
f = os.path.join(self.dir, 'synsets.txt')
if not os.path.isfile(f):
self._download_caffe_meta()
self.caffepb = None
def get_synset_words_1000(self):
"""
Returns:
dict: {cls_number: cls_name}
"""
fname = os.path.join(self.dir, 'synset_words.txt')
assert os.path.isfile(fname), fname
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
def get_synset_1000(self):
"""
Returns:
dict: {cls_number: synset_id}
"""
fname = os.path.join(self.dir, 'synsets.txt')
assert os.path.isfile(fname)
lines = [x.strip() for x in open(fname).readlines()]
return dict(enumerate(lines))
def _download_caffe_meta(self):
fpath = download(CAFFE_ILSVRC12_URL[0], self.dir, expect_size=CAFFE_ILSVRC12_URL[1])
tarfile.open(fpath, 'r:gz').extractall(self.dir)
def get_image_list(self, name, dir_structure='original'):
"""
Args:
name (str): 'train' or 'val' or 'test'
dir_structure (str): same as in :meth:`ILSVRC12.__init__()`.
Returns:
list: list of (image filename, label)
"""
assert name in ['train', 'val', 'test']
assert dir_structure in ['original', 'train']
add_label_to_fname = (name != 'train' and dir_structure != 'original')
if add_label_to_fname:
synset = self.get_synset_1000()
fname = os.path.join(self.dir, name + '.txt')
assert os.path.isfile(fname), fname
with open(fname) as f:
ret = []
for line in f.readlines():
name, cls = line.strip().split()
cls = int(cls)
if add_label_to_fname:
name = os.path.join(synset[cls], name)
ret.append((name.strip(), cls))
assert len(ret), fname
return ret
def get_per_pixel_mean(self, size=None):
"""
Args:
size (tuple): image size in (h, w). Defaults to (256, 256).
Returns:
np.ndarray: per-pixel mean of shape (h, w, 3 (BGR)) in range [0, 255].
"""
if self.caffepb is None:
self.caffepb = get_caffe_pb()
obj = self.caffepb.BlobProto()
mean_file = os.path.join(self.dir, 'imagenet_mean.binaryproto')
with open(mean_file, 'rb') as f:
obj.ParseFromString(f.read())
arr = np.array(obj.data).reshape((3, 256, 256)).astype('float32')
arr = np.transpose(arr, [1, 2, 0])
if size is not None:
arr = cv2.resize(arr, size[::-1])
return arr
@staticmethod
def guess_dir_structure(dir):
"""
Return the directory structure of "dir".
Args:
dir(str): something like '/path/to/imagenet/val'
Returns:
either 'train' or 'original'
"""
subdir = os.listdir(dir)[0]
# find a subdir starting with 'n'
if subdir.startswith('n') and \
os.path.isdir(os.path.join(dir, subdir)):
dir_structure = 'train'
else:
dir_structure = 'original'
logger.info(
"[ILSVRC12] Assuming directory {} has '{}' structure.".format(
dir, dir_structure))
return dir_structure
class ILSVRC12Files(RNGDataFlow):
"""
Same as :class:`ILSVRC12`, but produces filenames of the images instead of nparrays.
This could be useful when ``cv2.imread`` is a bottleneck and you want to
decode it in smarter ways (e.g. in parallel).
"""
def __init__(self, dir, name, meta_dir=None,
shuffle=None, dir_structure=None):
"""
Same as in :class:`ILSVRC12`.
"""
assert name in ['train', 'test', 'val'], name
dir = os.path.expanduser(dir)
assert os.path.isdir(dir), dir
self.full_dir = os.path.join(dir, name)
self.name = name
assert os.path.isdir(self.full_dir), self.full_dir
assert meta_dir is None or os.path.isdir(meta_dir), meta_dir
if shuffle is None:
shuffle = name == 'train'
self.shuffle = shuffle
if name == 'train':
dir_structure = 'train'
if dir_structure is None:
dir_structure = ILSVRCMeta.guess_dir_structure(self.full_dir)
meta = ILSVRCMeta(meta_dir)
self.imglist = meta.get_image_list(name, dir_structure)
for fname, _ in self.imglist[:10]:
fname = os.path.join(self.full_dir, fname)
assert os.path.isfile(fname), fname
def __len__(self):
return len(self.imglist)
def __iter__(self):
idxs = np.arange(len(self.imglist))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
fname, label = self.imglist[k]
fname = os.path.join(self.full_dir, fname)
yield [fname, label]
class ILSVRC12(ILSVRC12Files):
"""
Produces uint8 ILSVRC12 images of shape [h, w, 3(BGR)], and a label between [0, 999].
The label map follows the synsets.txt file in http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz.
"""
def __init__(self, dir, name, meta_dir=None,
shuffle=None, dir_structure=None):
"""
Args:
dir (str): A directory containing a subdir named ``name``,
containing the images in a structure described below.
name (str): One of 'train' or 'val' or 'test'.
shuffle (bool): shuffle the dataset.
Defaults to True if name=='train'.
dir_structure (str): One of 'original' or 'train'.
The directory structure for the 'val' directory.
'original' means the original decompressed directory, which only has list of image files (as below).
If set to 'train', it expects the same two-level directory structure similar to 'dir/train/'.
By default, it tries to automatically detect the structure.
You probably do not need to care about this option because 'original' is what people usually have.
Example:
When `dir_structure=='original'`, `dir` should have the following structure:
.. code-block:: none
dir/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
ILSVRC2012_val_00000001.JPEG
...
test/
ILSVRC2012_test_00000001.JPEG
...
With the downloaded ILSVRC12_img_*.tar, you can use the following
command to build the above structure:
.. code-block:: none
mkdir val && tar xvf ILSVRC12_img_val.tar -C val
mkdir test && tar xvf ILSVRC12_img_test.tar -C test
mkdir train && tar xvf ILSVRC12_img_train.tar -C train && cd train
find -type f -name '*.tar' | parallel -P 10 'echo {} && mkdir -p {/.} && tar xf {} -C {/.}'
When `dir_structure=='train'`, `dir` should have the following structure:
.. code-block:: none
dir/
train/
n02134418/
n02134418_198.JPEG
...
...
val/
n01440764/
ILSVRC2012_val_00000293.JPEG
...
...
test/
ILSVRC2012_test_00000001.JPEG
...
"""
super(ILSVRC12, self).__init__(
dir, name, meta_dir, shuffle, dir_structure)
"""
There are some CMYK / png images, but cv2 seems robust to them.
https://github.com/tensorflow/models/blob/c0cd713f59cfe44fa049b3120c417cc4079c17e3/research/inception/inception/data/build_imagenet_data.py#L264-L300
"""
def __iter__(self):
for fname, label in super(ILSVRC12, self).__iter__():
im = cv2.imread(fname, cv2.IMREAD_COLOR)
assert im is not None, fname
yield [im, label]
@staticmethod
def get_training_bbox(bbox_dir, imglist):
import xml.etree.ElementTree as ET
ret = []
def parse_bbox(fname):
root = ET.parse(fname).getroot()
size = root.find('size').getchildren()
size = map(int, [size[0].text, size[1].text])
box = root.find('object').find('bndbox').getchildren()
box = map(lambda x: float(x.text), box)
return np.asarray(box, dtype='float32')
with timed_operation('Loading Bounding Boxes ...'):
cnt = 0
for k in tqdm.trange(len(imglist)):
fname = imglist[k][0]
fname = fname[:-4] + 'xml'
fname = os.path.join(bbox_dir, fname)
try:
ret.append(parse_bbox(fname))
cnt += 1
except Exception:
ret.append(None)
logger.info("{}/{} images have bounding box.".format(cnt, len(imglist)))
return ret
try:
import cv2
except ImportError:
from ...utils.develop import create_dummy_class
ILSVRC12 = create_dummy_class('ILSVRC12', 'cv2') # noqa
if __name__ == '__main__':
meta = ILSVRCMeta()
# print(meta.get_synset_words_1000())
ds = ILSVRC12('/home/wyx/data/fake_ilsvrc/', 'train', shuffle=False)
ds.reset_state()
for _ in ds:
from IPython import embed
embed()
break
| 10,381 | 32.81759 | 153 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/dataset/caltech101.py | # -*- coding: utf-8 -*-
# File: caltech101.py
import os
from ...utils import logger
from ...utils.fs import download, get_dataset_path
from ..base import RNGDataFlow
__all__ = ["Caltech101Silhouettes"]
def maybe_download(url, work_directory):
"""Download the data from Marlin's website, unless it's already here."""
filename = url.split("/")[-1]
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
logger.info("Downloading to {}...".format(filepath))
download(url, work_directory)
return filepath
class Caltech101Silhouettes(RNGDataFlow):
"""
Produces [image, label] in Caltech101 Silhouettes dataset,
image is 28x28 in the range [0,1], label is an int in the range [0,100].
"""
_DIR_NAME = "caltech101_data"
_SOURCE_URL = "https://people.cs.umass.edu/~marlin/data/"
def __init__(self, name, shuffle=True, dir=None):
"""
Args:
name (str): 'train', 'test', 'val'
shuffle (bool): shuffle the dataset
"""
if dir is None:
dir = get_dataset_path(self._DIR_NAME)
assert name in ['train', 'test', 'val']
self.name = name
self.shuffle = shuffle
def get_images_and_labels(data_file):
f = maybe_download(self._SOURCE_URL + data_file, dir)
data = scipy.io.loadmat(f)
return data
self.data = get_images_and_labels("caltech101_silhouettes_28_split1.mat")
if self.name == "train":
self.images = self.data["train_data"].reshape((4100, 28, 28))
self.labels = self.data["train_labels"].ravel() - 1
elif self.name == "test":
self.images = self.data["test_data"].reshape((2307, 28, 28))
self.labels = self.data["test_labels"].ravel() - 1
else:
self.images = self.data["val_data"].reshape((2264, 28, 28))
self.labels = self.data["val_labels"].ravel() - 1
def __len__(self):
return self.images.shape[0]
def __iter__(self):
idxs = list(range(self.__len__()))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
img = self.images[k]
label = self.labels[k]
yield [img, label]
try:
import scipy.io
except ImportError:
from ...utils.develop import create_dummy_class
Caltech101Silhouettes = create_dummy_class('Caltech101Silhouettes', 'scipy.io') # noqa
if __name__ == "__main__":
ds = Caltech101Silhouettes("train")
ds.reset_state()
for _ in ds:
from IPython import embed
embed()
break
| 2,655 | 28.511111 | 90 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/dataset/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .bsds500 import *
from .cifar import *
from .ilsvrc import *
from .mnist import *
from .svhn import *
from .caltech101 import *
from pkgutil import iter_modules
import os
import os.path
__all__ = []
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else dir(p)
if lst:
del globals()[name]
for k in lst:
if not k.startswith('__'):
globals()[k] = p.__dict__[k]
__all__.append(k)
_CURR_DIR = os.path.dirname(__file__)
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if not module_name.startswith('_'):
global_import(module_name)
| 1,073 | 25.195122 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/dataset/svhn.py | # -*- coding: utf-8 -*-
# File: svhn.py
import numpy as np
import os
from ...utils import logger
from ...utils.fs import download, get_dataset_path
from ..base import RNGDataFlow
__all__ = ['SVHNDigit']
SVHN_URL = "http://ufldl.stanford.edu/housenumbers/"
class SVHNDigit(RNGDataFlow):
"""
`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Cropped Digit Dataset.
Produces [img, label], img of 32x32x3 in range [0,255], label of 0-9
"""
_Cache = {}
def __init__(self, name, data_dir=None, shuffle=True):
"""
Args:
name (str): 'train', 'test', or 'extra'.
data_dir (str): a directory containing the original {train,test,extra}_32x32.mat.
shuffle (bool): shuffle the dataset.
"""
self.shuffle = shuffle
if name in SVHNDigit._Cache:
self.X, self.Y = SVHNDigit._Cache[name]
return
if data_dir is None:
data_dir = get_dataset_path('svhn_data')
assert name in ['train', 'test', 'extra'], name
filename = os.path.join(data_dir, name + '_32x32.mat')
if not os.path.isfile(filename):
url = SVHN_URL + os.path.basename(filename)
logger.info("File {} not found!".format(filename))
logger.info("Downloading from {} ...".format(url))
download(url, os.path.dirname(filename))
logger.info("Loading {} ...".format(filename))
data = scipy.io.loadmat(filename)
self.X = data['X'].transpose(3, 0, 1, 2)
self.Y = data['y'].reshape((-1))
self.Y[self.Y == 10] = 0
SVHNDigit._Cache[name] = (self.X, self.Y)
def __len__(self):
return self.X.shape[0]
def __iter__(self):
n = self.X.shape[0]
idxs = np.arange(n)
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
# since svhn is quite small, just do it for safety
yield [self.X[k], self.Y[k]]
@staticmethod
def get_per_pixel_mean(names=('train', 'test', 'extra')):
"""
Args:
names (tuple[str]): names of the dataset split
Returns:
a 32x32x3 image, the mean of all images in the given datasets
"""
for name in names:
assert name in ['train', 'test', 'extra'], name
images = [SVHNDigit(x).X for x in names]
return np.concatenate(tuple(images)).mean(axis=0)
try:
import scipy.io
except ImportError:
from ...utils.develop import create_dummy_class
SVHNDigit = create_dummy_class('SVHNDigit', 'scipy.io') # noqa
if __name__ == '__main__':
a = SVHNDigit('train')
b = SVHNDigit.get_per_pixel_mean()
| 2,704 | 29.738636 | 93 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/dataset/cifar.py | # -*- coding: utf-8 -*-
# File: cifar.py
# Yukun Chen <[email protected]>
import numpy as np
import os
import pickle
import tarfile
from ...utils import logger
from ...utils.fs import download, get_dataset_path
from ..base import RNGDataFlow
__all__ = ['CifarBase', 'Cifar10', 'Cifar100']
DATA_URL_CIFAR_10 = ('http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz', 170498071)
DATA_URL_CIFAR_100 = ('http://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz', 169001437)
def maybe_download_and_extract(dest_directory, cifar_classnum):
"""Download and extract the tarball from Alex's website. Copied from tensorflow example """
assert cifar_classnum == 10 or cifar_classnum == 100
if cifar_classnum == 10:
cifar_foldername = 'cifar-10-batches-py'
else:
cifar_foldername = 'cifar-100-python'
if os.path.isdir(os.path.join(dest_directory, cifar_foldername)):
logger.info("Found cifar{} data in {}.".format(cifar_classnum, dest_directory))
return
else:
DATA_URL = DATA_URL_CIFAR_10 if cifar_classnum == 10 else DATA_URL_CIFAR_100
filename = DATA_URL[0].split('/')[-1]
filepath = os.path.join(dest_directory, filename)
download(DATA_URL[0], dest_directory, expect_size=DATA_URL[1])
tarfile.open(filepath, 'r:gz').extractall(dest_directory)
def read_cifar(filenames, cifar_classnum):
assert cifar_classnum == 10 or cifar_classnum == 100
ret = []
for fname in filenames:
fo = open(fname, 'rb')
dic = pickle.load(fo, encoding='bytes')
data = dic[b'data']
if cifar_classnum == 10:
label = dic[b'labels']
IMG_NUM = 10000 # cifar10 data are split into blocks of 10000
else:
label = dic[b'fine_labels']
IMG_NUM = 50000 if 'train' in fname else 10000
fo.close()
for k in range(IMG_NUM):
img = data[k].reshape(3, 32, 32)
img = np.transpose(img, [1, 2, 0])
ret.append([img, label[k]])
return ret
def get_filenames(dir, cifar_classnum):
assert cifar_classnum == 10 or cifar_classnum == 100
if cifar_classnum == 10:
train_files = [os.path.join(
dir, 'cifar-10-batches-py', 'data_batch_%d' % i) for i in range(1, 6)]
test_files = [os.path.join(
dir, 'cifar-10-batches-py', 'test_batch')]
meta_file = os.path.join(dir, 'cifar-10-batches-py', 'batches.meta')
elif cifar_classnum == 100:
train_files = [os.path.join(dir, 'cifar-100-python', 'train')]
test_files = [os.path.join(dir, 'cifar-100-python', 'test')]
meta_file = os.path.join(dir, 'cifar-100-python', 'meta')
return train_files, test_files, meta_file
def _parse_meta(filename, cifar_classnum):
with open(filename, 'rb') as f:
obj = pickle.load(f)
return obj['label_names' if cifar_classnum == 10 else 'fine_label_names']
class CifarBase(RNGDataFlow):
"""
Produces [image, label] in Cifar10/100 dataset,
image is 32x32x3 in the range [0,255].
label is an int.
"""
def __init__(self, train_or_test, shuffle=None, dir=None, cifar_classnum=10):
"""
Args:
train_or_test (str): 'train' or 'test'
shuffle (bool): defaults to True for training set.
dir (str): path to the dataset directory
cifar_classnum (int): 10 or 100
"""
assert train_or_test in ['train', 'test']
assert cifar_classnum == 10 or cifar_classnum == 100
self.cifar_classnum = cifar_classnum
if dir is None:
dir = get_dataset_path('cifar{}_data'.format(cifar_classnum))
maybe_download_and_extract(dir, self.cifar_classnum)
train_files, test_files, meta_file = get_filenames(dir, cifar_classnum)
if train_or_test == 'train':
self.fs = train_files
else:
self.fs = test_files
for f in self.fs:
if not os.path.isfile(f):
raise ValueError('Failed to find file: ' + f)
self._label_names = _parse_meta(meta_file, cifar_classnum)
self.train_or_test = train_or_test
self.data = read_cifar(self.fs, cifar_classnum)
self.dir = dir
if shuffle is None:
shuffle = train_or_test == 'train'
self.shuffle = shuffle
def __len__(self):
return 50000 if self.train_or_test == 'train' else 10000
def __iter__(self):
idxs = np.arange(len(self.data))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
# since cifar is quite small, just do it for safety
yield self.data[k]
def get_per_pixel_mean(self, names=('train', 'test')):
"""
Args:
names (tuple[str]): the names ('train' or 'test') of the datasets
Returns:
a mean image of all images in the given datasets, with size 32x32x3
"""
for name in names:
assert name in ['train', 'test'], name
train_files, test_files, _ = get_filenames(self.dir, self.cifar_classnum)
all_files = []
if 'train' in names:
all_files.extend(train_files)
if 'test' in names:
all_files.extend(test_files)
all_imgs = [x[0] for x in read_cifar(all_files, self.cifar_classnum)]
arr = np.array(all_imgs, dtype='float32')
mean = np.mean(arr, axis=0)
return mean
def get_label_names(self):
"""
Returns:
[str]: name of each class.
"""
return self._label_names
def get_per_channel_mean(self, names=('train', 'test')):
"""
Args:
names (tuple[str]): the names ('train' or 'test') of the datasets
Returns:
An array of three values as mean of each channel, for all images in the given datasets.
"""
mean = self.get_per_pixel_mean(names)
return np.mean(mean, axis=(0, 1))
class Cifar10(CifarBase):
"""
Produces [image, label] in Cifar10 dataset,
image is 32x32x3 in the range [0,255].
label is an int.
"""
def __init__(self, train_or_test, shuffle=None, dir=None):
"""
Args:
train_or_test (str): either 'train' or 'test'.
shuffle (bool): shuffle the dataset, default to shuffle in training
"""
super(Cifar10, self).__init__(train_or_test, shuffle, dir, 10)
class Cifar100(CifarBase):
""" Similar to Cifar10"""
def __init__(self, train_or_test, shuffle=None, dir=None):
super(Cifar100, self).__init__(train_or_test, shuffle, dir, 100)
if __name__ == '__main__':
ds = Cifar10('train')
mean = ds.get_per_channel_mean()
print(mean)
import cv2
ds.reset_state()
for i, dp in enumerate(ds):
if i == 100:
break
img = dp[0]
cv2.imwrite("{:04d}.jpg".format(i), img)
| 6,964 | 33.310345 | 99 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/dataset/mnist.py | # -*- coding: utf-8 -*-
# File: mnist.py
import gzip
import numpy
import os
from ...utils import logger
from ...utils.fs import download, get_dataset_path
from ..base import RNGDataFlow
__all__ = ['Mnist', 'FashionMnist']
def maybe_download(url, work_directory):
"""Download the data from Yann's website, unless it's already here."""
filename = url.split('/')[-1]
filepath = os.path.join(work_directory, filename)
if not os.path.exists(filepath):
logger.info("Downloading to {}...".format(filepath))
download(url, work_directory)
return filepath
def _read32(bytestream):
dt = numpy.dtype(numpy.uint32).newbyteorder('>')
return numpy.frombuffer(bytestream.read(4), dtype=dt)[0]
def extract_images(filename):
"""Extract the images into a 4D uint8 numpy array [index, y, x, depth]."""
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' %
(magic, filename))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
data = numpy.frombuffer(buf, dtype=numpy.uint8)
data = data.reshape(num_images, rows, cols, 1)
data = data.astype('float32') / 255.0
return data
def extract_labels(filename):
"""Extract the labels into a 1D uint8 numpy array [index]."""
with gzip.open(filename) as bytestream:
magic = _read32(bytestream)
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' %
(magic, filename))
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = numpy.frombuffer(buf, dtype=numpy.uint8)
return labels
class Mnist(RNGDataFlow):
"""
Produces [image, label] in MNIST dataset,
image is 28x28 in the range [0,1], label is an int.
"""
_DIR_NAME = 'mnist_data'
_SOURCE_URL = 'http://yann.lecun.com/exdb/mnist/'
def __init__(self, train_or_test, shuffle=True, dir=None):
"""
Args:
train_or_test (str): either 'train' or 'test'
shuffle (bool): shuffle the dataset
"""
if dir is None:
dir = get_dataset_path(self._DIR_NAME)
assert train_or_test in ['train', 'test']
self.train_or_test = train_or_test
self.shuffle = shuffle
def get_images_and_labels(image_file, label_file):
f = maybe_download(self._SOURCE_URL + image_file, dir)
images = extract_images(f)
f = maybe_download(self._SOURCE_URL + label_file, dir)
labels = extract_labels(f)
assert images.shape[0] == labels.shape[0]
return images, labels
if self.train_or_test == 'train':
self.images, self.labels = get_images_and_labels(
'train-images-idx3-ubyte.gz',
'train-labels-idx1-ubyte.gz')
else:
self.images, self.labels = get_images_and_labels(
't10k-images-idx3-ubyte.gz',
't10k-labels-idx1-ubyte.gz')
def __len__(self):
return self.images.shape[0]
def __iter__(self):
idxs = list(range(self.__len__()))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
img = self.images[k].reshape((28, 28))
label = self.labels[k]
yield [img, label]
class FashionMnist(Mnist):
"""
Same API as :class:`Mnist`, but more fashion.
"""
_DIR_NAME = 'fashion_mnist_data'
_SOURCE_URL = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
def get_label_names(self):
"""
Returns:
[str]: the name of each class
"""
# copied from https://github.com/zalandoresearch/fashion-mnist
return ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
if __name__ == '__main__':
ds = Mnist('train')
ds.reset_state()
for _ in ds:
from IPython import embed
embed()
break
| 4,301 | 29.94964 | 79 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/base.py | # -*- coding: utf-8 -*-
# File: base.py
import os
import inspect
import pprint
from collections import namedtuple
import weakref
from ...utils.argtools import log_once
from ...utils.utils import get_rng
from ...utils.develop import deprecated
from ..image import check_dtype
# Cannot import here if we want to keep backward compatibility.
# Because this causes circular dependency
# from .transform import TransformList, PhotometricTransform, TransformFactory
__all__ = ['Augmentor', 'ImageAugmentor', 'AugmentorList', 'PhotometricAugmentor']
def _reset_augmentor_after_fork(aug_ref):
aug = aug_ref()
if aug:
aug.reset_state()
def _default_repr(self):
"""
Produce something like:
"imgaug.MyAugmentor(field1={self.field1}, field2={self.field2})"
It assumes that the instance `self` contains attributes that match its constructor.
"""
classname = type(self).__name__
argspec = inspect.getfullargspec(self.__init__)
assert argspec.varargs is None, "The default __repr__ in {} doesn't work for varargs!".format(classname)
assert argspec.varkw is None, "The default __repr__ in {} doesn't work for kwargs!".format(classname)
defaults = {}
fields = argspec.args[1:]
defaults_pos = argspec.defaults
if defaults_pos is not None:
for f, d in zip(fields[::-1], defaults_pos[::-1]):
defaults[f] = d
for k in argspec.kwonlyargs:
fields.append(k)
if k in argspec.kwonlydefaults:
defaults[k] = argspec.kwonlydefaults[k]
argstr = []
for f in fields:
assert hasattr(self, f), \
"Attribute {} in {} not found! Default __repr__ only works if " \
"the instance has attributes that match the constructor.".format(f, classname)
attr = getattr(self, f)
if f in defaults and attr is defaults[f]:
continue
argstr.append("{}={}".format(f, pprint.pformat(attr)))
return "imgaug.{}({})".format(classname, ', '.join(argstr))
ImagePlaceholder = namedtuple("ImagePlaceholder", ["shape"])
class ImageAugmentor(object):
"""
Base class for an augmentor
ImageAugmentor should take images of type uint8 in range [0, 255], or
floating point images in range [0, 1] or [0, 255].
Attributes:
rng: a numpy :class:`RandomState`
"""
def __init__(self):
self.reset_state()
# only available on Unix after Python 3.7
if hasattr(os, 'register_at_fork'):
os.register_at_fork(
after_in_child=lambda: _reset_augmentor_after_fork(weakref.ref(self)))
def _init(self, params=None):
if params:
for k, v in params.items():
if k != 'self' and not k.startswith('_'):
setattr(self, k, v)
def reset_state(self):
"""
Reset rng and other state of the augmentor.
Similar to :meth:`DataFlow.reset_state`, the caller of Augmentor
is responsible for calling this method (once or more times) in the **process that uses the augmentor**
before using it.
If you use a built-in augmentation dataflow (:class:`AugmentImageComponent`, etc),
this method will be called in the dataflow's own `reset_state` method.
If you use Python≥3.7 on Unix, this method will be automatically called after fork,
and you do not need to bother calling it.
"""
self.rng = get_rng(self)
def _rand_range(self, low=1.0, high=None, size=None):
"""
Generate uniform float random number between low and high using `self.rng`.
"""
if high is None:
low, high = 0, low
if size is None:
size = []
return self.rng.uniform(low, high, size).astype("float32")
def __str__(self):
try:
return _default_repr(self)
except AssertionError as e:
log_once(e.args[0], 'warn')
return super(Augmentor, self).__repr__()
__repr__ = __str__
def get_transform(self, img):
"""
Instantiate a :class:`Transform` object to be used given the input image.
Subclasses should implement this method.
The :class:`ImageAugmentor` often has random policies which generate deterministic transform.
Any of those random policies should happen inside this method and instantiate
an actual deterministic transform to be performed.
The returned :class:`Transform` object should perform deterministic transforms
through its :meth:`apply_*` method.
In this way, the returned :class:`Transform` object can be used to transform not only the
input image, but other images or coordinates associated with the image.
Args:
img (ndarray): see notes of this class on the requirements.
Returns:
Transform
"""
# This should be an abstract method
# But we provide an implementation that uses the old interface,
# for backward compatibility
log_once("The old augmentor interface was deprecated. "
"Please implement {} with `get_transform` instead!".format(self.__class__.__name__),
"warning")
def legacy_augment_coords(self, coords, p):
try:
return self._augment_coords(coords, p)
except AttributeError:
pass
try:
return self.augment_coords(coords, p)
except AttributeError:
pass
return coords # this is the old default
p = None # the default return value for this method
try:
p = self._get_augment_params(img)
except AttributeError:
pass
try:
p = self.get_augment_params(img)
except AttributeError:
pass
from .transform import BaseTransform, TransformFactory
if isinstance(p, BaseTransform): # some old augs return Transform already
return p
return TransformFactory(name="LegacyConversion -- " + str(self),
apply_image=lambda img: self._augment(img, p),
apply_coords=lambda coords: legacy_augment_coords(self, coords, p))
def augment(self, img):
"""
Create a transform, and apply it to augment the input image.
This can save you one line of code, when you only care the augmentation of "one image".
It will not return the :class:`Transform` object to you
so you won't be able to apply the same transformation on
other data associated with the image.
Args:
img (ndarray): see notes of this class on the requirements.
Returns:
img: augmented image.
"""
check_dtype(img)
t = self.get_transform(img)
return t.apply_image(img)
# ###########################
# Legacy interfaces:
# ###########################
@deprecated("Please use `get_transform` instead!", "2020-06-06", max_num_warnings=3)
def augment_return_params(self, d):
t = self.get_transform(d)
return t.apply_image(d), t
@deprecated("Please use `transform.apply_image` instead!", "2020-06-06", max_num_warnings=3)
def augment_with_params(self, d, param):
return param.apply_image(d)
@deprecated("Please use `transform.apply_coords` instead!", "2020-06-06", max_num_warnings=3)
def augment_coords(self, coords, param):
return param.apply_coords(coords)
class AugmentorList(ImageAugmentor):
"""
Augment an image by a list of augmentors
"""
def __init__(self, augmentors):
"""
Args:
augmentors (list): list of :class:`ImageAugmentor` instance to be applied.
"""
assert isinstance(augmentors, (list, tuple)), augmentors
self.augmentors = augmentors
super(AugmentorList, self).__init__()
def reset_state(self):
""" Will reset state of each augmentor """
super(AugmentorList, self).reset_state()
for a in self.augmentors:
a.reset_state()
def get_transform(self, img):
check_dtype(img)
assert img.ndim in [2, 3], img.ndim
from .transform import LazyTransform, TransformList
# The next augmentor requires the previous one to finish.
# So we have to use LazyTransform
tfms = []
for idx, a in enumerate(self.augmentors):
if idx == 0:
t = a.get_transform(img)
else:
t = LazyTransform(a.get_transform)
if isinstance(t, TransformList):
tfms.extend(t.tfms)
else:
tfms.append(t)
return TransformList(tfms)
def __str__(self):
repr_each_aug = ",\n".join([" " + repr(x) for x in self.augmentors])
return "imgaug.AugmentorList([\n{}])".format(repr_each_aug)
__repr__ = __str__
Augmentor = ImageAugmentor
"""
Legacy name. Augmentor and ImageAugmentor are now the same thing.
"""
class PhotometricAugmentor(ImageAugmentor):
"""
A base class for ImageAugmentor which only affects pixels.
Subclass should implement `_get_params(img)` and `_impl(img, params)`.
"""
def get_transform(self, img):
p = self._get_augment_params(img)
from .transform import PhotometricTransform
return PhotometricTransform(func=lambda img: self._augment(img, p),
name="from " + str(self))
def _get_augment_params(self, _):
return None
| 9,656 | 32.765734 | 110 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/imgaug_test.py | # -*- coding: utf-8 -*-
# File: _test.py
import sys
import numpy as np
import cv2
import unittest
from .base import ImageAugmentor, AugmentorList
from .imgproc import Contrast
from .noise import SaltPepperNoise
from .misc import Flip, Resize
def _rand_image(shape=(20, 20)):
return np.random.rand(*shape).astype("float32")
class LegacyBrightness(ImageAugmentor):
def __init__(self, delta, clip=True):
super(LegacyBrightness, self).__init__()
assert delta > 0
self._init(locals())
def _get_augment_params(self, _):
v = self._rand_range(-self.delta, self.delta)
return v
def _augment(self, img, v):
old_dtype = img.dtype
img = img.astype('float32')
img += v
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class LegacyFlip(ImageAugmentor):
def __init__(self, horiz=False, vert=False, prob=0.5):
super(LegacyFlip, self).__init__()
if horiz and vert:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
elif horiz:
self.code = 1
elif vert:
self.code = 0
else:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def _get_augment_params(self, img):
h, w = img.shape[:2]
do = self._rand_range() < self.prob
return (do, h, w)
def _augment(self, img, param):
do, _, _ = param
if do:
ret = cv2.flip(img, self.code)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
else:
ret = img
return ret
def _augment_coords(self, coords, param):
do, h, w = param
if do:
if self.code == 0:
coords[:, 1] = h - coords[:, 1]
elif self.code == 1:
coords[:, 0] = w - coords[:, 0]
return coords
class ImgAugTest(unittest.TestCase):
def _get_augs(self):
return AugmentorList([
Contrast((0.8, 1.2)),
Flip(horiz=True),
Resize((30, 30)),
SaltPepperNoise()
])
def _get_augs_with_legacy(self):
return AugmentorList([
LegacyBrightness(0.5),
LegacyFlip(horiz=True),
Resize((30, 30)),
SaltPepperNoise()
])
def test_augmentors(self):
augmentors = self._get_augs()
img = _rand_image()
orig = img.copy()
tfms = augmentors.get_transform(img)
# test printing
print(augmentors)
print(tfms)
newimg = tfms.apply_image(img)
print(tfms) # lazy ones will instantiate after the first apply
newimg2 = tfms.apply_image(orig)
self.assertTrue(np.allclose(newimg, newimg2))
self.assertEqual(newimg2.shape[0], 30)
coords = np.asarray([[0, 0], [10, 12]], dtype="float32")
tfms.apply_coords(coords)
def test_legacy_usage(self):
augmentors = self._get_augs()
img = _rand_image()
orig = img.copy()
newimg, tfms = augmentors.augment_return_params(img)
newimg2 = augmentors.augment_with_params(orig, tfms)
self.assertTrue(np.allclose(newimg, newimg2))
self.assertEqual(newimg2.shape[0], 30)
coords = np.asarray([[0, 0], [10, 12]], dtype="float32")
augmentors.augment_coords(coords, tfms)
def test_legacy_augs_new_usage(self):
augmentors = self._get_augs_with_legacy()
img = _rand_image()
orig = img.copy()
tfms = augmentors.get_transform(img)
newimg = tfms.apply_image(img)
newimg2 = tfms.apply_image(orig)
self.assertTrue(np.allclose(newimg, newimg2))
self.assertEqual(newimg2.shape[0], 30)
coords = np.asarray([[0, 0], [10, 12]], dtype="float32")
tfms.apply_coords(coords)
def test_legacy_augs_legacy_usage(self):
augmentors = self._get_augs_with_legacy()
img = _rand_image()
orig = img.copy()
newimg, tfms = augmentors.augment_return_params(img)
newimg2 = augmentors.augment_with_params(orig, tfms)
self.assertTrue(np.allclose(newimg, newimg2))
self.assertEqual(newimg2.shape[0], 30)
coords = np.asarray([[0, 0], [10, 12]], dtype="float32")
augmentors.augment_coords(coords, tfms)
if __name__ == '__main__':
anchors = [(0.2, 0.2), (0.7, 0.2), (0.8, 0.8), (0.5, 0.5), (0.2, 0.5)]
augmentors = AugmentorList([
Contrast((0.8, 1.2)),
Flip(horiz=True),
# RandomCropRandomShape(0.3),
SaltPepperNoise()
])
img = cv2.imread(sys.argv[1])
newimg, prms = augmentors._augment_return_params(img)
cv2.imshow(" ", newimg.astype('uint8'))
cv2.waitKey()
newimg = augmentors._augment(img, prms)
cv2.imshow(" ", newimg.astype('uint8'))
cv2.waitKey()
| 5,002 | 27.919075 | 91 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/crop.py | # -*- coding: utf-8 -*-
# File: crop.py
import numpy as np
import cv2
from ...utils.argtools import shape2d
from ...utils.develop import log_deprecated
from .base import ImageAugmentor, ImagePlaceholder
from .transform import CropTransform, TransformList, ResizeTransform, PhotometricTransform
from .misc import ResizeShortestEdge
__all__ = ['RandomCrop', 'CenterCrop', 'RandomCropRandomShape',
'GoogleNetRandomCropAndResize', 'RandomCutout']
class RandomCrop(ImageAugmentor):
""" Randomly crop the image into a smaller one """
def __init__(self, crop_shape):
"""
Args:
crop_shape: (h, w), int or a tuple of int
"""
crop_shape = shape2d(crop_shape)
crop_shape = (int(crop_shape[0]), int(crop_shape[1]))
super(RandomCrop, self).__init__()
self._init(locals())
def get_transform(self, img):
orig_shape = img.shape
assert orig_shape[0] >= self.crop_shape[0] \
and orig_shape[1] >= self.crop_shape[1], orig_shape
diffh = orig_shape[0] - self.crop_shape[0]
h0 = self.rng.randint(diffh + 1)
diffw = orig_shape[1] - self.crop_shape[1]
w0 = self.rng.randint(diffw + 1)
return CropTransform(h0, w0, self.crop_shape[0], self.crop_shape[1])
class CenterCrop(ImageAugmentor):
""" Crop the image at the center"""
def __init__(self, crop_shape):
"""
Args:
crop_shape: (h, w) tuple or a int
"""
crop_shape = shape2d(crop_shape)
self._init(locals())
def get_transform(self, img):
orig_shape = img.shape
assert orig_shape[0] >= self.crop_shape[0] \
and orig_shape[1] >= self.crop_shape[1], orig_shape
h0 = int((orig_shape[0] - self.crop_shape[0]) * 0.5)
w0 = int((orig_shape[1] - self.crop_shape[1]) * 0.5)
return CropTransform(h0, w0, self.crop_shape[0], self.crop_shape[1])
class RandomCropRandomShape(ImageAugmentor):
""" Random crop with a random shape"""
def __init__(self, wmin, hmin,
wmax=None, hmax=None,
max_aspect_ratio=None):
"""
Randomly crop a box of shape (h, w), sampled from [min, max] (both inclusive).
If max is None, will use the input image shape.
Args:
wmin, hmin, wmax, hmax: range to sample shape.
max_aspect_ratio (float): this argument has no effect and is deprecated.
"""
super(RandomCropRandomShape, self).__init__()
if max_aspect_ratio is not None:
log_deprecated("RandomCropRandomShape(max_aspect_ratio)", "It is never implemented!", "2020-06-06")
self._init(locals())
def get_transform(self, img):
hmax = self.hmax or img.shape[0]
wmax = self.wmax or img.shape[1]
h = self.rng.randint(self.hmin, hmax + 1)
w = self.rng.randint(self.wmin, wmax + 1)
diffh = img.shape[0] - h
diffw = img.shape[1] - w
assert diffh >= 0 and diffw >= 0, str(diffh) + ", " + str(diffw)
y0 = 0 if diffh == 0 else self.rng.randint(diffh)
x0 = 0 if diffw == 0 else self.rng.randint(diffw)
return CropTransform(y0, x0, h, w)
class GoogleNetRandomCropAndResize(ImageAugmentor):
"""
The random crop and resize augmentation proposed in
Sec. 6 of "Going Deeper with Convolutions" by Google.
This implementation follows the details in ``fb.resnet.torch``.
It attempts to crop a random rectangle with 8%~100% area of the original image,
and keep the aspect ratio between 3/4 to 4/3. Then it resize this crop to the target shape.
If such crop cannot be found in 10 iterations, it will do a ResizeShortestEdge + CenterCrop.
"""
def __init__(self, crop_area_fraction=(0.08, 1.),
aspect_ratio_range=(0.75, 1.333),
target_shape=224, interp=cv2.INTER_LINEAR):
"""
Args:
crop_area_fraction (tuple(float)): Defaults to crop 8%-100% area.
aspect_ratio_range (tuple(float)): Defaults to make aspect ratio in 3/4-4/3.
target_shape (int): Defaults to 224, the standard ImageNet image shape.
"""
super(GoogleNetRandomCropAndResize, self).__init__()
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
area = h * w
for _ in range(10):
targetArea = self.rng.uniform(*self.crop_area_fraction) * area
aspectR = self.rng.uniform(*self.aspect_ratio_range)
ww = int(np.sqrt(targetArea * aspectR) + 0.5)
hh = int(np.sqrt(targetArea / aspectR) + 0.5)
if self.rng.uniform() < 0.5:
ww, hh = hh, ww
if hh <= h and ww <= w:
x1 = self.rng.randint(0, w - ww + 1)
y1 = self.rng.randint(0, h - hh + 1)
return TransformList([
CropTransform(y1, x1, hh, ww),
ResizeTransform(hh, ww, self.target_shape, self.target_shape, interp=self.interp)
])
resize = ResizeShortestEdge(self.target_shape, interp=self.interp).get_transform(img)
out_shape = (resize.new_h, resize.new_w)
crop = CenterCrop(self.target_shape).get_transform(ImagePlaceholder(shape=out_shape))
return TransformList([resize, crop])
class RandomCutout(ImageAugmentor):
"""
The cutout augmentation, as described in https://arxiv.org/abs/1708.04552
"""
def __init__(self, h_range, w_range, fill=0.):
"""
Args:
h_range (int or tuple): the height of rectangle to cut.
If a tuple, will randomly sample from this range [low, high)
w_range (int or tuple): similar to above
fill (float): the fill value
"""
super(RandomCutout, self).__init__()
self._init(locals())
def _get_cutout_shape(self):
if isinstance(self.h_range, int):
h = self.h_range
else:
h = self.rng.randint(self.h_range)
if isinstance(self.w_range, int):
w = self.w_range
else:
w = self.rng.randint(self.w_range)
return h, w
@staticmethod
def _cutout(img, y0, x0, h, w, fill):
img[y0:y0 + h, x0:x0 + w] = fill
return img
def get_transform(self, img):
h, w = self._get_cutout_shape()
x0 = self.rng.randint(0, img.shape[1] + 1 - w)
y0 = self.rng.randint(0, img.shape[0] + 1 - h)
return PhotometricTransform(
lambda img: RandomCutout._cutout(img, y0, x0, h, w, self.fill),
"cutout")
| 6,674 | 36.711864 | 111 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/transform.py | # -*- coding: utf-8 -*-
# File: transform.py
import numpy as np
import cv2
from ...utils.argtools import log_once
from .base import ImageAugmentor, _default_repr
TransformAugmentorBase = ImageAugmentor
"""
Legacy alias. Please don't use.
"""
# This legacy augmentor requires us to import base from here, causing circular dependency.
# Should remove this in the future.
__all__ = ["Transform", "ResizeTransform", "CropTransform", "FlipTransform",
"TransformList", "TransformFactory"]
# class WrappedImgFunc(object):
# def __init__(self, func, need_float=False, cast_back=True, fix_ndim=True):
# self.func = func
# self.need_float = need_float
# self.cast_back = cast_back
# def __call__(self, img):
# old_dtype = img.dtype
# old_ndim = img.ndim
# if self.need_float:
# img = img.astype("float32")
# img = self.func(img)
# if self.cast_back and old_dtype == np.uint8 and img.dtype != np.uint8:
# img = np.clip(img, 0, 255.)
# if self.cast_back:
# img = img.astype(old_dtype)
# if self.fix_ndim and old_ndim == 3 and img.ndim == 2:
# img = img[:, :, np.newaxis]
# return img
class BaseTransform(object):
"""
Base class for all transforms, for type-check only.
Users should never interact with this class.
"""
def _init(self, params=None):
if params:
for k, v in params.items():
if k != 'self' and not k.startswith('_'):
setattr(self, k, v)
class Transform(BaseTransform):
"""
A deterministic image transformation, used to implement
the (probably random) augmentors.
This class is also the place to provide a default implementation to any
:meth:`apply_xxx` method.
The current default is to raise NotImplementedError in any such methods.
All subclasses should implement `apply_image`.
The image should be of type uint8 in range [0, 255], or
floating point images in range [0, 1] or [0, 255]
Some subclasses may implement `apply_coords`, when applicable.
It should take and return a numpy array of Nx2, where each row is the (x, y) coordinate.
The implementation of each method may choose to modify its input data
in-place for efficient transformation.
"""
def __init__(self):
# provide an empty __init__, so that __repr__ will work nicely
pass
def __getattr__(self, name):
if name.startswith("apply_"):
def f(x):
raise NotImplementedError("{} does not implement method {}".format(self.__class__.__name__, name))
return f
raise AttributeError("Transform object has no attribute {}".format(name))
def __repr__(self):
try:
return _default_repr(self)
except AssertionError as e:
log_once(e.args[0], 'warn')
return super(Transform, self).__repr__()
__str__ = __repr__
class ResizeTransform(Transform):
"""
Resize the image.
"""
def __init__(self, h, w, new_h, new_w, interp):
"""
Args:
h, w (int):
new_h, new_w (int):
interp (int): cv2 interpolation method
"""
super(ResizeTransform, self).__init__()
self._init(locals())
def apply_image(self, img):
assert img.shape[:2] == (self.h, self.w)
ret = cv2.resize(
img, (self.new_w, self.new_h),
interpolation=self.interp)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
def apply_coords(self, coords):
coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
return coords
class CropTransform(Transform):
"""
Crop a subimage from an image.
"""
def __init__(self, y0, x0, h, w):
super(CropTransform, self).__init__()
self._init(locals())
def apply_image(self, img):
return img[self.y0:self.y0 + self.h, self.x0:self.x0 + self.w]
def apply_coords(self, coords):
coords[:, 0] -= self.x0
coords[:, 1] -= self.y0
return coords
class WarpAffineTransform(Transform):
def __init__(self, mat, dsize, interp=cv2.INTER_LINEAR,
borderMode=cv2.BORDER_CONSTANT, borderValue=0):
super(WarpAffineTransform, self).__init__()
self._init(locals())
def apply_image(self, img):
ret = cv2.warpAffine(img, self.mat, self.dsize,
flags=self.interp,
borderMode=self.borderMode,
borderValue=self.borderValue)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
def apply_coords(self, coords):
coords = np.concatenate((coords, np.ones((coords.shape[0], 1), dtype='f4')), axis=1)
coords = np.dot(coords, self.mat.T)
return coords
class FlipTransform(Transform):
"""
Flip the image.
"""
def __init__(self, h, w, horiz=True):
"""
Args:
h, w (int):
horiz (bool): whether to flip horizontally or vertically.
"""
self._init(locals())
def apply_image(self, img):
if self.horiz:
return img[:, ::-1]
else:
return img[::-1]
def apply_coords(self, coords):
if self.horiz:
coords[:, 0] = self.w - coords[:, 0]
else:
coords[:, 1] = self.h - coords[:, 1]
return coords
class TransposeTransform(Transform):
"""
Transpose the image.
"""
def apply_image(self, img):
ret = cv2.transpose(img)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
def apply_coords(self, coords):
return coords[:, ::-1]
class NoOpTransform(Transform):
"""
A Transform that does nothing.
"""
def __getattr__(self, name):
if name.startswith("apply_"):
return lambda x: x
raise AttributeError("NoOpTransform object has no attribute {}".format(name))
class PhotometricTransform(Transform):
"""
A transform which only has `apply_image` but does nothing in `apply_coords`.
"""
def __init__(self, func, name=None):
"""
Args:
func (img -> img): a function to be used for :meth:`apply_image`
name (str, optional): the name of this transform
"""
self._func = func
self._name = name
def apply_image(self, img):
return self._func(img)
def apply_coords(self, coords):
return coords
def __repr__(self):
return "imgaug.PhotometricTransform({})".format(self._name if self._name else "")
__str__ = __repr__
class TransformFactory(Transform):
"""
Create a :class:`Transform` from user-provided functions.
"""
def __init__(self, name=None, **kwargs):
"""
Args:
name (str, optional): the name of this transform
**kwargs: mapping from `'apply_xxx'` to implementation of such functions.
"""
for k, v in kwargs.items():
if k.startswith('apply_'):
setattr(self, k, v)
else:
raise KeyError("Unknown argument '{}' in TransformFactory!".format(k))
self._name = name
def __str__(self):
return "imgaug.TransformFactory({})".format(self._name if self._name else "")
__repr__ = __str__
"""
Some meta-transforms:
they do not perform actual transformation, but delegate to another Transform.
"""
class TransformList(BaseTransform):
"""
Apply a list of transforms sequentially.
"""
def __init__(self, tfms):
"""
Args:
tfms (list[Transform]):
"""
for t in tfms:
assert isinstance(t, BaseTransform), t
self.tfms = tfms
def _apply(self, x, meth):
for t in self.tfms:
x = getattr(t, meth)(x)
return x
def __getattr__(self, name):
if name.startswith("apply_"):
return lambda x: self._apply(x, name)
raise AttributeError("TransformList object has no attribute {}".format(name))
def __str__(self):
repr_each_tfm = ",\n".join([" " + repr(x) for x in self.tfms])
return "imgaug.TransformList([\n{}])".format(repr_each_tfm)
def __add__(self, other):
other = other.tfms if isinstance(other, TransformList) else [other]
return TransformList(self.tfms + other)
def __iadd__(self, other):
other = other.tfms if isinstance(other, TransformList) else [other]
self.tfms.extend(other)
return self
def __radd__(self, other):
other = other.tfms if isinstance(other, TransformList) else [other]
return TransformList(other + self.tfms)
__repr__ = __str__
class LazyTransform(BaseTransform):
"""
A transform that's instantiated at the first call to `apply_image`.
"""
def __init__(self, get_transform):
"""
Args:
get_transform (img -> Transform): a function which will be used to instantiate a Transform.
"""
self.get_transform = get_transform
self._transform = None
def apply_image(self, img):
if not self._transform:
self._transform = self.get_transform(img)
return self._transform.apply_image(img)
def _apply(self, x, meth):
assert self._transform is not None, \
"LazyTransform.{} can only be called after the transform has been applied on an image!"
return getattr(self._transform, meth)(x)
def __getattr__(self, name):
if name.startswith("apply_"):
return lambda x: self._apply(x, name)
raise AttributeError("TransformList object has no attribute {}".format(name))
def __repr__(self):
if self._transform is None:
return "LazyTransform(get_transform={})".format(str(self.get_transform))
else:
return repr(self._transform)
__str__ = __repr__
def apply_coords(self, coords):
return self._apply(coords, "apply_coords")
if __name__ == '__main__':
shape = (100, 100)
center = (10, 70)
mat = cv2.getRotationMatrix2D(center, 20, 1)
trans = WarpAffineTransform(mat, (130, 130))
def draw_points(img, pts):
for p in pts:
try:
img[int(p[1]), int(p[0])] = 0
except IndexError:
pass
image = cv2.imread('cat.jpg')
image = cv2.resize(image, shape)
orig_image = image.copy()
coords = np.random.randint(100, size=(20, 2))
draw_points(orig_image, coords)
print(coords)
for _ in range(1):
coords = trans.apply_coords(coords)
image = trans.apply_image(image)
print(coords)
draw_points(image, coords)
# viz = cv2.resize(viz, (1200, 600))
orig_image = cv2.resize(orig_image, (600, 600))
image = cv2.resize(image, (600, 600))
viz = np.concatenate((orig_image, image), axis=1)
cv2.imshow("mat", viz)
cv2.waitKey()
| 11,275 | 27.912821 | 114 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/misc.py | # -*- coding: utf-8 -*-
# File: misc.py
import cv2
from ...utils import logger
from ...utils.argtools import shape2d
from .base import ImageAugmentor
from .transform import ResizeTransform, NoOpTransform, FlipTransform, TransposeTransform
__all__ = ['Flip', 'Resize', 'RandomResize', 'ResizeShortestEdge', 'Transpose']
class Flip(ImageAugmentor):
"""
Random flip the image either horizontally or vertically.
"""
def __init__(self, horiz=False, vert=False, prob=0.5):
"""
Args:
horiz (bool): use horizontal flip.
vert (bool): use vertical flip.
prob (float): probability of flip.
"""
super(Flip, self).__init__()
if horiz and vert:
raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
if not horiz and not vert:
raise ValueError("At least one of horiz or vert has to be True!")
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
do = self._rand_range() < self.prob
if not do:
return NoOpTransform()
else:
return FlipTransform(h, w, self.horiz)
class Resize(ImageAugmentor):
""" Resize image to a target size"""
def __init__(self, shape, interp=cv2.INTER_LINEAR):
"""
Args:
shape: (h, w) tuple or a int
interp: cv2 interpolation method
"""
shape = tuple(shape2d(shape))
self._init(locals())
def get_transform(self, img):
return ResizeTransform(
img.shape[0], img.shape[1],
self.shape[0], self.shape[1], self.interp)
class ResizeShortestEdge(ImageAugmentor):
"""
Resize the shortest edge to a certain number while
keeping the aspect ratio.
"""
def __init__(self, size, interp=cv2.INTER_LINEAR):
"""
Args:
size (int): the size to resize the shortest edge to.
"""
size = int(size)
self._init(locals())
def get_transform(self, img):
h, w = img.shape[:2]
scale = self.size * 1.0 / min(h, w)
if h < w:
newh, neww = self.size, int(scale * w + 0.5)
else:
newh, neww = int(scale * h + 0.5), self.size
return ResizeTransform(h, w, newh, neww, self.interp)
class RandomResize(ImageAugmentor):
""" Randomly rescale width and height of the image."""
def __init__(self, xrange, yrange=None, minimum=(0, 0), aspect_ratio_thres=0.15,
interp=cv2.INTER_LINEAR):
"""
Args:
xrange (tuple): a (min, max) tuple. If is floating point, the
tuple defines the range of scaling ratio of new width, e.g. (0.9, 1.2).
If is integer, the tuple defines the range of new width in pixels, e.g. (200, 350).
yrange (tuple): similar to xrange, but for height. Should be None when aspect_ratio_thres==0.
minimum (tuple): (xmin, ymin) in pixels. To avoid scaling down too much.
aspect_ratio_thres (float): discard samples which change aspect ratio
larger than this threshold. Set to 0 to keep aspect ratio.
interp: cv2 interpolation method
"""
super(RandomResize, self).__init__()
assert aspect_ratio_thres >= 0
self._init(locals())
def is_float(tp):
return isinstance(tp[0], float) or isinstance(tp[1], float)
if yrange is not None:
assert is_float(xrange) == is_float(yrange), "xrange and yrange has different type!"
self._is_scale = is_float(xrange)
if aspect_ratio_thres == 0:
if self._is_scale:
assert xrange == yrange or yrange is None
else:
if yrange is not None:
logger.warn("aspect_ratio_thres==0, yrange is not used!")
def get_transform(self, img):
cnt = 0
h, w = img.shape[:2]
def get_dest_size():
if self._is_scale:
sx = self._rand_range(*self.xrange)
if self.aspect_ratio_thres == 0:
sy = sx
else:
sy = self._rand_range(*self.yrange)
destX = max(sx * w, self.minimum[0])
destY = max(sy * h, self.minimum[1])
else:
sx = self._rand_range(*self.xrange)
if self.aspect_ratio_thres == 0:
sy = sx * 1.0 / w * h
else:
sy = self._rand_range(*self.yrange)
destX = max(sx, self.minimum[0])
destY = max(sy, self.minimum[1])
return (int(destX + 0.5), int(destY + 0.5))
while True:
destX, destY = get_dest_size()
if self.aspect_ratio_thres > 0: # don't check when thres == 0
oldr = w * 1.0 / h
newr = destX * 1.0 / destY
diff = abs(newr - oldr) / oldr
if diff >= self.aspect_ratio_thres + 1e-5:
cnt += 1
if cnt > 50:
logger.warn("RandomResize failed to augment an image")
return ResizeTransform(h, w, h, w, self.interp)
continue
return ResizeTransform(h, w, destY, destX, self.interp)
class Transpose(ImageAugmentor):
"""
Random transpose the image
"""
def __init__(self, prob=0.5):
"""
Args:
prob (float): probability of transpose.
"""
super(Transpose, self).__init__()
self.prob = prob
def get_transform(self, _):
if self.rng.rand() < self.prob:
return TransposeTransform()
else:
return NoOpTransform()
| 5,824 | 32.866279 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/external.py | #!/usr/bin/env python
import numpy as np
from .base import ImageAugmentor
from .transform import Transform
__all__ = ['IAAugmentor', 'Albumentations']
class IAATransform(Transform):
def __init__(self, aug, img_shape):
self._init(locals())
def apply_image(self, img):
return self.aug.augment_image(img)
def apply_coords(self, coords):
import imgaug as IA
points = [IA.Keypoint(x=x, y=y) for x, y in coords]
points = IA.KeypointsOnImage(points, shape=self.img_shape)
augmented = self.aug.augment_keypoints([points])[0].keypoints
return np.asarray([[p.x, p.y] for p in augmented])
class IAAugmentor(ImageAugmentor):
"""
Wrap an augmentor form the IAA library: https://github.com/aleju/imgaug.
Both images and coordinates are supported.
Note:
1. It's NOT RECOMMENDED
to use coordinates because the IAA library does not handle coordinates accurately.
2. Only uint8 images are supported by the IAA library.
3. The IAA library can only produces images of the same shape.
Example:
.. code-block:: python
from imgaug import augmenters as iaa # this is the aleju/imgaug library
from tensorpack import imgaug # this is not the aleju/imgaug library
# or from dataflow import imgaug # if you're using the standalone version of dataflow
myaug = imgaug.IAAugmentor(
iaa.Sequential([
iaa.Sharpen(alpha=(0, 1), lightness=(0.75, 1.5)),
iaa.Fliplr(0.5),
iaa.Crop(px=(0, 100)),
])
"""
def __init__(self, augmentor):
"""
Args:
augmentor (iaa.Augmenter):
"""
super(IAAugmentor, self).__init__()
self._aug = augmentor
def get_transform(self, img):
return IAATransform(self._aug.to_deterministic(), img.shape)
class AlbumentationsTransform(Transform):
def __init__(self, aug, param):
self._init(locals())
def apply_image(self, img):
return self.aug.apply(img, **self.param)
class Albumentations(ImageAugmentor):
"""
Wrap an augmentor form the albumentations library: https://github.com/albu/albumentations.
Coordinate augmentation is not supported by the library.
Example:
.. code-block:: python
from tensorpack import imgaug
# or from dataflow import imgaug # if you're using the standalone version of dataflow
import albumentations as AB
myaug = imgaug.Albumentations(AB.RandomRotate90(p=1))
"""
def __init__(self, augmentor):
"""
Args:
augmentor (albumentations.BasicTransform):
"""
super(Albumentations, self).__init__()
self._aug = augmentor
def get_transform(self, img):
return AlbumentationsTransform(self._aug, self._aug.get_params())
| 2,900 | 28.602041 | 94 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/noise.py | # -*- coding: utf-8 -*-
# File: noise.py
import numpy as np
import cv2
from .base import PhotometricAugmentor
__all__ = ['JpegNoise', 'GaussianNoise', 'SaltPepperNoise']
class JpegNoise(PhotometricAugmentor):
""" Random JPEG noise. """
def __init__(self, quality_range=(40, 100)):
"""
Args:
quality_range (tuple): range to sample JPEG quality
"""
super(JpegNoise, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
return self.rng.randint(*self.quality_range)
def _augment(self, img, q):
enc = cv2.imencode('.jpg', img, [cv2.IMWRITE_JPEG_QUALITY, q])[1]
return cv2.imdecode(enc, 1).astype(img.dtype)
class GaussianNoise(PhotometricAugmentor):
"""
Add random Gaussian noise N(0, sigma^2) of the same shape to img.
"""
def __init__(self, sigma=1, clip=True):
"""
Args:
sigma (float): stddev of the Gaussian distribution.
clip (bool): clip the result to [0,255] in the end.
"""
super(GaussianNoise, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
return self.rng.randn(*img.shape)
def _augment(self, img, noise):
old_dtype = img.dtype
ret = img + noise * self.sigma
if self.clip or old_dtype == np.uint8:
ret = np.clip(ret, 0, 255)
return ret.astype(old_dtype)
class SaltPepperNoise(PhotometricAugmentor):
""" Salt and pepper noise.
Randomly set some elements in image to 0 or 255, regardless of its channels.
"""
def __init__(self, white_prob=0.05, black_prob=0.05):
"""
Args:
white_prob (float), black_prob (float): probabilities setting an element to 255 or 0.
"""
assert white_prob + black_prob <= 1, "Sum of probabilities cannot be greater than 1"
super(SaltPepperNoise, self).__init__()
self._init(locals())
def _get_augment_params(self, img):
return self.rng.uniform(low=0, high=1, size=img.shape)
def _augment(self, img, param):
img[param > (1 - self.white_prob)] = 255
img[param < self.black_prob] = 0
return img
| 2,238 | 28.077922 | 97 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/paste.py | # -*- coding: utf-8 -*-
# File: paste.py
import numpy as np
from abc import abstractmethod
from .base import ImageAugmentor
from .transform import TransformFactory
__all__ = ['CenterPaste', 'BackgroundFiller', 'ConstantBackgroundFiller',
'RandomPaste']
class BackgroundFiller(object):
""" Base class for all BackgroundFiller"""
def fill(self, background_shape, img):
"""
Return a proper background image of background_shape, given img.
Args:
background_shape (tuple): a shape (h, w)
img: an image
Returns:
a background image
"""
background_shape = tuple(background_shape)
return self._fill(background_shape, img)
@abstractmethod
def _fill(self, background_shape, img):
pass
class ConstantBackgroundFiller(BackgroundFiller):
""" Fill the background by a constant """
def __init__(self, value):
"""
Args:
value (float): the value to fill the background.
"""
self.value = value
def _fill(self, background_shape, img):
assert img.ndim in [3, 2]
if img.ndim == 3:
return_shape = background_shape + (img.shape[2],)
else:
return_shape = background_shape
return np.zeros(return_shape, dtype=img.dtype) + self.value
# NOTE:
# apply_coords should be implemeted in paste transform, but not yet done
class CenterPaste(ImageAugmentor):
"""
Paste the image onto the center of a background canvas.
"""
def __init__(self, background_shape, background_filler=None):
"""
Args:
background_shape (tuple): shape of the background canvas.
background_filler (BackgroundFiller): How to fill the background. Defaults to zero-filler.
"""
if background_filler is None:
background_filler = ConstantBackgroundFiller(0)
self._init(locals())
def get_transform(self, _):
return TransformFactory(name=str(self), apply_image=lambda img: self._impl(img))
def _impl(self, img):
img_shape = img.shape[:2]
assert self.background_shape[0] >= img_shape[0] and self.background_shape[1] >= img_shape[1]
background = self.background_filler.fill(
self.background_shape, img)
y0 = int((self.background_shape[0] - img_shape[0]) * 0.5)
x0 = int((self.background_shape[1] - img_shape[1]) * 0.5)
background[y0:y0 + img_shape[0], x0:x0 + img_shape[1]] = img
return background
class RandomPaste(CenterPaste):
"""
Randomly paste the image onto a background canvas.
"""
def get_transform(self, img):
img_shape = img.shape[:2]
assert self.background_shape[0] > img_shape[0] and self.background_shape[1] > img_shape[1]
y0 = self._rand_range(self.background_shape[0] - img_shape[0])
x0 = self._rand_range(self.background_shape[1] - img_shape[1])
l = int(x0), int(y0)
return TransformFactory(name=str(self), apply_image=lambda img: self._impl(img, l))
def _impl(self, img, loc):
x0, y0 = loc
img_shape = img.shape[:2]
background = self.background_filler.fill(
self.background_shape, img)
background[y0:y0 + img_shape[0], x0:x0 + img_shape[1]] = img
return background
| 3,372 | 29.387387 | 102 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/deform.py | # -*- coding: utf-8 -*-
# File: deform.py
import numpy as np
from ...utils import logger
from .base import ImageAugmentor
from .transform import TransformFactory
__all__ = []
# Code was temporarily kept here for a future reference in case someone needs it
# But it was already deprecated,
# because this augmentation is not a general one that people will often find helpful.
class GaussianMap(object):
""" Generate Gaussian weighted deformation map"""
# TODO really needs speedup
def __init__(self, image_shape, sigma=0.5):
assert len(image_shape) == 2
self.shape = image_shape
self.sigma = sigma
def get_gaussian_weight(self, anchor):
"""
Args:
anchor: coordinate of the center
"""
ret = np.zeros(self.shape, dtype='float32')
y, x = np.mgrid[:self.shape[0], :self.shape[1]]
y = y.astype('float32') / ret.shape[0] - anchor[0]
x = x.astype('float32') / ret.shape[1] - anchor[1]
g = np.exp(-(x**2 + y ** 2) / self.sigma)
# cv2.imshow(" ", g)
# cv2.waitKey()
return g
def np_sample(img, coords):
# a numpy implementation of ImageSample layer
coords = np.maximum(coords, 0)
coords = np.minimum(coords, np.array([img.shape[0] - 1, img.shape[1] - 1]))
lcoor = np.floor(coords).astype('int32')
ucoor = lcoor + 1
ucoor = np.minimum(ucoor, np.array([img.shape[0] - 1, img.shape[1] - 1]))
diff = coords - lcoor
neg_diff = 1.0 - diff
lcoory, lcoorx = np.split(lcoor, 2, axis=2)
ucoory, ucoorx = np.split(ucoor, 2, axis=2)
diff = np.repeat(diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
neg_diff = np.repeat(neg_diff, 3, 2).reshape((diff.shape[0], diff.shape[1], 2, 3))
diffy, diffx = np.split(diff, 2, axis=2)
ndiffy, ndiffx = np.split(neg_diff, 2, axis=2)
ret = img[lcoory, lcoorx, :] * ndiffx * ndiffy + \
img[ucoory, ucoorx, :] * diffx * diffy + \
img[lcoory, ucoorx, :] * ndiffy * diffx + \
img[ucoory, lcoorx, :] * diffy * ndiffx
return ret[:, :, 0, :]
class GaussianDeform(ImageAugmentor):
"""
Some kind of slow deformation I made up. Don't count on it.
"""
# TODO input/output with different shape
def __init__(self, anchors, shape, sigma=0.5, randrange=None):
"""
Args:
anchors (list): list of center coordinates in range [0,1].
shape(list or tuple): image shape in [h, w].
sigma (float): sigma for Gaussian weight
randrange (int): offset range. Defaults to shape[0] / 8
"""
logger.warn("GaussianDeform is slow. Consider using it with 4 or more prefetching processes.")
super(GaussianDeform, self).__init__()
self.anchors = anchors
self.K = len(self.anchors)
self.shape = shape
self.grid = np.mgrid[0:self.shape[0], 0:self.shape[1]].transpose(1, 2, 0)
self.grid = self.grid.astype('float32') # HxWx2
gm = GaussianMap(self.shape, sigma=sigma)
self.gws = np.array([gm.get_gaussian_weight(ank)
for ank in self.anchors], dtype='float32') # KxHxW
self.gws = self.gws.transpose(1, 2, 0) # HxWxK
if randrange is None:
self.randrange = self.shape[0] / 8
else:
self.randrange = randrange
self.sigma = sigma
def get_transform(self, img):
v = self.rng.rand(self.K, 2).astype('float32') - 0.5
v = v * 2 * self.randrange
return TransformFactory(name=str(self), apply_image=lambda img: self._augment(img, v))
def _augment(self, img, v):
grid = self.grid + np.dot(self.gws, v)
return np_sample(img, grid)
| 3,748 | 33.394495 | 102 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/geometry.py | # -*- coding: utf-8 -*-
# File: geometry.py
import math
import numpy as np
import cv2
from .base import ImageAugmentor
from .transform import WarpAffineTransform, CropTransform, TransformList
__all__ = ['Shift', 'Rotation', 'RotationAndCropValid', 'Affine']
class Shift(ImageAugmentor):
""" Random horizontal and vertical shifts """
def __init__(self, horiz_frac=0, vert_frac=0,
border=cv2.BORDER_REPLICATE, border_value=0):
"""
Args:
horiz_frac (float): max abs fraction for horizontal shift
vert_frac (float): max abs fraction for horizontal shift
border: cv2 border method
border_value: cv2 border value for border=cv2.BORDER_CONSTANT
"""
assert horiz_frac < 1.0 and vert_frac < 1.0
super(Shift, self).__init__()
self._init(locals())
def get_transform(self, img):
max_dx = self.horiz_frac * img.shape[1]
max_dy = self.vert_frac * img.shape[0]
dx = np.round(self._rand_range(-max_dx, max_dx))
dy = np.round(self._rand_range(-max_dy, max_dy))
mat = np.array([[1, 0, dx], [0, 1, dy]], dtype='float32')
return WarpAffineTransform(
mat, img.shape[1::-1],
borderMode=self.border, borderValue=self.border_value)
class Rotation(ImageAugmentor):
""" Random rotate the image w.r.t a random center"""
def __init__(self, max_deg, center_range=(0, 1),
interp=cv2.INTER_LINEAR,
border=cv2.BORDER_REPLICATE, step_deg=None, border_value=0):
"""
Args:
max_deg (float): max abs value of the rotation angle (in degree).
center_range (tuple): (min, max) range of the random rotation center.
interp: cv2 interpolation method
border: cv2 border method
step_deg (float): if not None, the stepping of the rotation
angle. The rotation angle will be a multiple of step_deg. This
option requires ``max_deg==180`` and step_deg has to be a divisor of 180)
border_value: cv2 border value for border=cv2.BORDER_CONSTANT
"""
assert step_deg is None or (max_deg == 180 and max_deg % step_deg == 0)
super(Rotation, self).__init__()
self._init(locals())
def get_transform(self, img):
center = img.shape[1::-1] * self._rand_range(
self.center_range[0], self.center_range[1], (2,))
deg = self._rand_range(-self.max_deg, self.max_deg)
if self.step_deg:
deg = deg // self.step_deg * self.step_deg
"""
The correct center is shape*0.5-0.5. This can be verified by:
SHAPE = 7
arr = np.random.rand(SHAPE, SHAPE)
orig = arr
c = SHAPE * 0.5 - 0.5
c = (c, c)
for k in range(4):
mat = cv2.getRotationMatrix2D(c, 90, 1)
arr = cv2.warpAffine(arr, mat, arr.shape)
assert np.all(arr == orig)
"""
mat = cv2.getRotationMatrix2D(tuple(center - 0.5), deg, 1)
return WarpAffineTransform(
mat, img.shape[1::-1], interp=self.interp,
borderMode=self.border, borderValue=self.border_value)
class RotationAndCropValid(ImageAugmentor):
""" Random rotate and then crop the largest possible rectangle.
Note that this will produce images of different shapes.
"""
def __init__(self, max_deg, interp=cv2.INTER_LINEAR, step_deg=None):
"""
Args:
max_deg, interp, step_deg: same as :class:`Rotation`
"""
assert step_deg is None or (max_deg == 180 and max_deg % step_deg == 0)
super(RotationAndCropValid, self).__init__()
self._init(locals())
def _get_deg(self, img):
deg = self._rand_range(-self.max_deg, self.max_deg)
if self.step_deg:
deg = deg // self.step_deg * self.step_deg
return deg
def get_transform(self, img):
deg = self._get_deg(img)
h, w = img.shape[:2]
center = (img.shape[1] * 0.5, img.shape[0] * 0.5)
rot_m = cv2.getRotationMatrix2D((center[0] - 0.5, center[1] - 0.5), deg, 1)
tfm = WarpAffineTransform(rot_m, (w, h), interp=self.interp)
neww, newh = RotationAndCropValid.largest_rotated_rect(w, h, deg)
neww = min(neww, w)
newh = min(newh, h)
newx = int(center[0] - neww * 0.5)
newy = int(center[1] - newh * 0.5)
tfm2 = CropTransform(newy, newx, newh, neww)
return TransformList([tfm, tfm2])
@staticmethod
def largest_rotated_rect(w, h, angle):
"""
Get largest rectangle after rotation.
http://stackoverflow.com/questions/16702966/rotate-image-and-crop-out-black-borders
"""
angle = angle / 180.0 * math.pi
if w <= 0 or h <= 0:
return 0, 0
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
# since the solutions for angle, -angle and 180-angle are all the same,
# if suffices to look at the first quadrant and the absolute values of sin,cos:
sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))
if side_short <= 2. * sin_a * cos_a * side_long:
# half constrained case: two crop corners touch the longer side,
# the other two corners are on the mid-line parallel to the longer line
x = 0.5 * side_short
wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)
else:
# fully constrained case: crop touches all 4 sides
cos_2a = cos_a * cos_a - sin_a * sin_a
wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a
return int(np.round(wr)), int(np.round(hr))
class Affine(ImageAugmentor):
"""
Random affine transform of the image w.r.t to the image center.
Transformations involve:
- Translation ("move" image on the x-/y-axis)
- Rotation
- Scaling ("zoom" in/out)
- Shear (move one side of the image, turning a square into a trapezoid)
"""
def __init__(self, scale=None, translate_frac=None, rotate_max_deg=0.0, shear=0.0,
interp=cv2.INTER_LINEAR, border=cv2.BORDER_REPLICATE, border_value=0):
"""
Args:
scale (tuple of 2 floats): scaling factor interval, e.g (a, b), then scale is
randomly sampled from the range a <= scale <= b. Will keep
original scale by default.
translate_frac (tuple of 2 floats): tuple of max abs fraction for horizontal
and vertical translation. For example translate_frac=(a, b), then horizontal shift
is randomly sampled in the range 0 < dx < img_width * a and vertical shift is
randomly sampled in the range 0 < dy < img_height * b. Will
not translate by default.
shear (float): max abs shear value in degrees between 0 to 180
interp: cv2 interpolation method
border: cv2 border method
border_value: cv2 border value for border=cv2.BORDER_CONSTANT
"""
if scale is not None:
assert isinstance(scale, tuple) and len(scale) == 2, \
"Argument scale should be a tuple of two floats, e.g (a, b)"
if translate_frac is not None:
assert isinstance(translate_frac, tuple) and len(translate_frac) == 2, \
"Argument translate_frac should be a tuple of two floats, e.g (a, b)"
assert shear >= 0.0, "Argument shear should be between 0.0 and 180.0"
super(Affine, self).__init__()
self._init(locals())
def get_transform(self, img):
if self.scale is not None:
scale = self._rand_range(self.scale[0], self.scale[1])
else:
scale = 1.0
if self.translate_frac is not None:
max_dx = self.translate_frac[0] * img.shape[1]
max_dy = self.translate_frac[1] * img.shape[0]
dx = np.round(self._rand_range(-max_dx, max_dx))
dy = np.round(self._rand_range(-max_dy, max_dy))
else:
dx = 0
dy = 0
if self.shear > 0.0:
shear = self._rand_range(-self.shear, self.shear)
sin_shear = math.sin(math.radians(shear))
cos_shear = math.cos(math.radians(shear))
else:
sin_shear = 0.0
cos_shear = 1.0
center = (img.shape[1::-1] * np.array((0.5, 0.5))) - 0.5
deg = self._rand_range(-self.rotate_max_deg, self.rotate_max_deg)
transform_matrix = cv2.getRotationMatrix2D(tuple(center), deg, scale)
# Apply shear :
if self.shear > 0.0:
m00 = transform_matrix[0, 0]
m01 = transform_matrix[0, 1]
m10 = transform_matrix[1, 0]
m11 = transform_matrix[1, 1]
transform_matrix[0, 1] = m01 * cos_shear + m00 * sin_shear
transform_matrix[1, 1] = m11 * cos_shear + m10 * sin_shear
# Add correction term to keep the center unchanged
tx = center[0] * (1.0 - m00) - center[1] * transform_matrix[0, 1]
ty = -center[0] * m10 + center[1] * (1.0 - transform_matrix[1, 1])
transform_matrix[0, 2] = tx
transform_matrix[1, 2] = ty
# Apply shift :
transform_matrix[0, 2] += dx
transform_matrix[1, 2] += dy
return WarpAffineTransform(transform_matrix, img.shape[1::-1],
self.interp, self.border, self.border_value)
| 9,653 | 39.058091 | 98 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/imgproc.py | # -*- coding: utf-8 -*-
# File: imgproc.py
import numpy as np
import cv2
from ...utils.develop import log_deprecated
from .base import PhotometricAugmentor
__all__ = ['Hue', 'Brightness', 'BrightnessScale', 'Contrast', 'MeanVarianceNormalize',
'GaussianBlur', 'Gamma', 'Clip', 'Saturation', 'Lighting', 'MinMaxNormalize']
class Hue(PhotometricAugmentor):
""" Randomly change color hue.
"""
def __init__(self, range=(0, 180), rgb=True):
"""
Args:
range(list or tuple): range from which the applied hue offset is selected
(maximum range can be [-90,90] for both uint8 and float32)
rgb (bool): whether input is RGB or BGR.
"""
super(Hue, self).__init__()
rgb = bool(rgb)
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.range)
def _augment(self, img, hue):
m = cv2.COLOR_BGR2HSV if not self.rgb else cv2.COLOR_RGB2HSV
hsv = cv2.cvtColor(img, m)
# https://docs.opencv.org/3.2.0/de/d25/imgproc_color_conversions.html#color_convert_rgb_hsv
if hsv.dtype.itemsize == 1:
# OpenCV uses 0-179 for 8-bit images
hsv[..., 0] = (hsv[..., 0] + hue) % 180
else:
# OpenCV uses 0-360 for floating point images
hsv[..., 0] = (hsv[..., 0] + 2 * hue) % 360
m = cv2.COLOR_HSV2BGR if not self.rgb else cv2.COLOR_HSV2RGB
img = cv2.cvtColor(hsv, m)
return img
class Brightness(PhotometricAugmentor):
"""
Adjust brightness by adding a random number.
"""
def __init__(self, delta, clip=True):
"""
Args:
delta (float): Randomly add a value within [-delta,delta]
clip (bool): clip results to [0,255] even when data type is not uint8.
"""
super(Brightness, self).__init__()
assert delta > 0
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(-self.delta, self.delta)
def _augment(self, img, v):
old_dtype = img.dtype
img = img.astype('float32')
img += v
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class BrightnessScale(PhotometricAugmentor):
"""
Adjust brightness by scaling by a random factor.
"""
def __init__(self, range, clip=True):
"""
Args:
range (tuple): Randomly scale the image by a factor in (range[0], range[1])
clip (bool): clip results to [0,255] even when data type is not uint8.
"""
super(BrightnessScale, self).__init__()
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.range)
def _augment(self, img, v):
old_dtype = img.dtype
img = img.astype('float32')
img *= v
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class Contrast(PhotometricAugmentor):
"""
Apply ``x = (x - mean) * contrast_factor + mean`` to each channel.
"""
def __init__(self, factor_range, rgb=None, clip=True):
"""
Args:
factor_range (list or tuple): an interval to randomly sample the `contrast_factor`.
rgb (bool or None): if None, use the mean per-channel.
clip (bool): clip to [0, 255] even when data type is not uint8.
"""
super(Contrast, self).__init__()
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.factor_range)
def _augment(self, img, r):
old_dtype = img.dtype
if img.ndim == 3:
if self.rgb is not None:
m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
grey = cv2.cvtColor(img.astype('float32'), m)
mean = np.mean(grey)
else:
mean = np.mean(img, axis=(0, 1), keepdims=True)
else:
mean = np.mean(img)
img = img * r + mean * (1 - r)
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class MeanVarianceNormalize(PhotometricAugmentor):
"""
Linearly scales the image to have zero mean and unit norm.
``x = (x - mean) / adjusted_stddev``
where ``adjusted_stddev = max(stddev, 1.0/sqrt(num_pixels * channels))``
This augmentor always returns float32 images.
"""
def __init__(self, all_channel=True):
"""
Args:
all_channel (bool): if True, normalize all channels together. else separately.
"""
self._init(locals())
def _augment(self, img, _):
img = img.astype('float32')
if self.all_channel:
mean = np.mean(img)
std = np.std(img)
else:
mean = np.mean(img, axis=(0, 1), keepdims=True)
std = np.std(img, axis=(0, 1), keepdims=True)
std = np.maximum(std, 1.0 / np.sqrt(np.prod(img.shape)))
img = (img - mean) / std
return img
class GaussianBlur(PhotometricAugmentor):
""" Gaussian blur the image with random window size"""
def __init__(self, size_range=(0, 3), sigma_range=(0, 0), symmetric=True, max_size=None):
"""
Args:
size_range (tuple[int]): Gaussian window size would be 2 * size +
1, where size is randomly sampled from this [low, high) range.
sigma_range (tuple[float]): min,max of the sigma value. 0 means
opencv's default.
symmetric (bool): whether to use the same size & sigma for x and y.
max_size (int): deprecated
"""
super(GaussianBlur, self).__init__()
if not isinstance(size_range, (list, tuple)):
size_range = (0, size_range)
assert isinstance(sigma_range, (list, tuple)), sigma_range
if max_size is not None:
log_deprecated("GaussianBlur(max_size=)", "Use size_range= instead!", "2020-09-01")
size_range = (0, max_size)
self._init(locals())
def _get_augment_params(self, _):
size_xy = self.rng.randint(self.size_range[0], self.size_range[1], size=(2,)) * 2 + 1
sigma_xy = self._rand_range(*self.sigma_range, size=(2,))
if self.symmetric:
size_xy[1] = size_xy[0]
sigma_xy[1] = sigma_xy[0]
return tuple(size_xy), tuple(sigma_xy)
def _augment(self, img, prm):
size, sigma = prm
return np.reshape(cv2.GaussianBlur(img, size, sigmaX=sigma[0], sigmaY=sigma[1],
borderType=cv2.BORDER_REPLICATE), img.shape)
class Gamma(PhotometricAugmentor):
""" Randomly adjust gamma """
def __init__(self, range=(-0.5, 0.5)):
"""
Args:
range(list or tuple): gamma range
"""
super(Gamma, self).__init__()
self._init(locals())
def _get_augment_params(self, _):
return self._rand_range(*self.range)
def _augment(self, img, gamma):
old_dtype = img.dtype
lut = ((np.arange(256, dtype='float32') / 255) ** (1. / (1. + gamma)) * 255).astype('uint8')
img = np.clip(img, 0, 255).astype('uint8')
ret = cv2.LUT(img, lut).astype(old_dtype)
if img.ndim == 3 and ret.ndim == 2:
ret = ret[:, :, np.newaxis]
return ret
class Clip(PhotometricAugmentor):
""" Clip the pixel values """
def __init__(self, min=0, max=255):
"""
Args:
min, max: the clip range
"""
self._init(locals())
def _augment(self, img, _):
return np.clip(img, self.min, self.max)
class Saturation(PhotometricAugmentor):
""" Randomly adjust saturation.
Follows the implementation in `fb.resnet.torch
<https://github.com/facebook/fb.resnet.torch/blob/master/datasets/transforms.lua#L218>`__.
"""
def __init__(self, alpha=0.4, rgb=True, clip=True):
"""
Args:
alpha(float): maximum saturation change.
rgb (bool): whether input is RGB or BGR.
clip (bool): clip results to [0,255] even when data type is not uint8.
"""
super().__init__()
rgb = bool(rgb)
assert alpha < 1
self._init(locals())
def _get_augment_params(self, _):
return 1 + self._rand_range(-self.alpha, self.alpha)
def _augment(self, img, v):
old_dtype = img.dtype
m = cv2.COLOR_RGB2GRAY if self.rgb else cv2.COLOR_BGR2GRAY
grey = cv2.cvtColor(img, m)
ret = img * v + (grey * (1 - v))[:, :, np.newaxis]
if self.clip or old_dtype == np.uint8:
ret = np.clip(ret, 0, 255)
return ret.astype(old_dtype)
class Lighting(PhotometricAugmentor):
""" Lighting noise, as in the paper
`ImageNet Classification with Deep Convolutional Neural Networks
<https://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks.pdf>`_.
The implementation follows `fb.resnet.torch
<https://github.com/facebook/fb.resnet.torch/blob/master/datasets/transforms.lua#L184>`__.
"""
def __init__(self, std, eigval, eigvec, clip=True):
"""
Args:
std (float): maximum standard deviation
eigval: a vector of (3,). The eigenvalues of 3 channels.
eigvec: a 3x3 matrix. Each column is one eigen vector.
clip (bool): clip results to [0,255] even when data type is not uint8.
"""
super(Lighting, self).__init__()
eigval = np.asarray(eigval, dtype="float32")
eigvec = np.asarray(eigvec, dtype="float32")
assert eigval.shape == (3,)
assert eigvec.shape == (3, 3)
self._init(locals())
def _get_augment_params(self, img):
assert img.shape[2] == 3
return (self.rng.randn(3) * self.std).astype("float32")
def _augment(self, img, v):
old_dtype = img.dtype
v = v * self.eigval
v = v.reshape((3, 1))
inc = np.dot(self.eigvec, v).reshape((3,))
img = np.add(img, inc)
if self.clip or old_dtype == np.uint8:
img = np.clip(img, 0, 255)
return img.astype(old_dtype)
class MinMaxNormalize(PhotometricAugmentor):
"""
Linearly scales the image to the range [min, max].
This augmentor always returns float32 images.
"""
def __init__(self, min=0, max=255, all_channel=True):
"""
Args:
max (float): The new maximum value
min (float): The new minimum value
all_channel (bool): if True, normalize all channels together. else separately.
"""
self._init(locals())
def _augment(self, img, _):
img = img.astype('float32')
if self.all_channel:
minimum = np.min(img)
maximum = np.max(img)
else:
minimum = np.min(img, axis=(0, 1), keepdims=True)
maximum = np.max(img, axis=(0, 1), keepdims=True)
img = (self.max - self.min) * (img - minimum) / (maximum - minimum) + self.min
return img
| 11,285 | 32.993976 | 114 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/convert.py | # -*- coding: utf-8 -*-
# File: convert.py
import numpy as np
import cv2
from .base import PhotometricAugmentor
__all__ = ['ColorSpace', 'Grayscale', 'ToUint8', 'ToFloat32']
class ColorSpace(PhotometricAugmentor):
""" Convert into another color space. """
def __init__(self, mode, keepdims=True):
"""
Args:
mode: OpenCV color space conversion code (e.g., ``cv2.COLOR_BGR2HSV``)
keepdims (bool): keep the dimension of image unchanged if OpenCV
changes it.
"""
super(ColorSpace, self).__init__()
self._init(locals())
def _augment(self, img, _):
transf = cv2.cvtColor(img, self.mode)
if self.keepdims:
if len(transf.shape) is not len(img.shape):
transf = transf[..., None]
return transf
class Grayscale(ColorSpace):
""" Convert RGB or BGR image to grayscale. """
def __init__(self, keepdims=True, rgb=False, keepshape=False):
"""
Args:
keepdims (bool): return image of shape [H, W, 1] instead of [H, W]
rgb (bool): interpret input as RGB instead of the default BGR
keepshape (bool): whether to duplicate the gray image into 3 channels
so the result has the same shape as input.
"""
mode = cv2.COLOR_RGB2GRAY if rgb else cv2.COLOR_BGR2GRAY
if keepshape:
assert keepdims, "keepdims must be True when keepshape==True"
super(Grayscale, self).__init__(mode, keepdims)
self.keepshape = keepshape
self.rgb = rgb
def _augment(self, img, _):
ret = super()._augment(img, _)
if self.keepshape:
return np.concatenate([ret] * 3, axis=2)
else:
return ret
class ToUint8(PhotometricAugmentor):
""" Clip and convert image to uint8. Useful to reduce communication overhead. """
def _augment(self, img, _):
return np.clip(img, 0, 255).astype(np.uint8)
class ToFloat32(PhotometricAugmentor):
""" Convert image to float32, may increase quality of the augmentor. """
def _augment(self, img, _):
return img.astype(np.float32)
| 2,180 | 30.608696 | 85 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .base import *
from .convert import *
from .crop import *
from .deform import *
from .geometry import *
from .imgproc import *
from .meta import *
from .misc import *
from .noise import *
from .paste import *
from .transform import *
from .external import *
import os
from pkgutil import iter_modules
__all__ = []
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else dir(p)
if lst:
del globals()[name]
for k in lst:
if not k.startswith('__'):
globals()[k] = p.__dict__[k]
__all__.append(k)
try:
import cv2 # noqa
except ImportError:
from ...utils import logger
logger.warn("Cannot import 'cv2', therefore image augmentation is not available.")
else:
_CURR_DIR = os.path.dirname(__file__)
for _, module_name, _ in iter_modules(
[os.path.dirname(__file__)]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if not module_name.startswith('_') and "_test" not in module_name:
global_import(module_name)
| 1,463 | 26.622642 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/dataflow/imgaug/meta.py | # -*- coding: utf-8 -*-
# File: meta.py
from .base import ImageAugmentor
from .transform import NoOpTransform, TransformList, TransformFactory
__all__ = ['RandomChooseAug', 'MapImage', 'Identity', 'RandomApplyAug',
'RandomOrderAug']
class Identity(ImageAugmentor):
""" A no-op augmentor """
def get_transform(self, img):
return NoOpTransform()
class RandomApplyAug(ImageAugmentor):
""" Randomly apply the augmentor with a probability.
Otherwise do nothing
"""
def __init__(self, aug, prob):
"""
Args:
aug (ImageAugmentor): an augmentor.
prob (float): the probability to apply the augmentor.
"""
self._init(locals())
super(RandomApplyAug, self).__init__()
def get_transform(self, img):
p = self.rng.rand()
if p < self.prob:
return self.aug.get_transform(img)
else:
return NoOpTransform()
def reset_state(self):
super(RandomApplyAug, self).reset_state()
self.aug.reset_state()
class RandomChooseAug(ImageAugmentor):
""" Randomly choose one from a list of augmentors """
def __init__(self, aug_lists):
"""
Args:
aug_lists (list): list of augmentors, or list of (augmentor, probability) tuples
"""
if isinstance(aug_lists[0], (tuple, list)):
prob = [k[1] for k in aug_lists]
aug_lists = [k[0] for k in aug_lists]
self._init(locals())
else:
prob = [1.0 / len(aug_lists)] * len(aug_lists)
self._init(locals())
super(RandomChooseAug, self).__init__()
def reset_state(self):
super(RandomChooseAug, self).reset_state()
for a in self.aug_lists:
a.reset_state()
def get_transform(self, img):
aug_idx = self.rng.choice(len(self.aug_lists), p=self.prob)
return self.aug_lists[aug_idx].get_transform(img)
class RandomOrderAug(ImageAugmentor):
"""
Apply the augmentors with randomized order.
"""
def __init__(self, aug_lists):
"""
Args:
aug_lists (list): list of augmentors.
The augmentors are assumed to not change the shape of images.
"""
self._init(locals())
super(RandomOrderAug, self).__init__()
def reset_state(self):
super(RandomOrderAug, self).reset_state()
for a in self.aug_lists:
a.reset_state()
def get_transform(self, img):
# Note: this makes assumption that the augmentors do not make changes
# to the image that will affect how the transforms will be instantiated
# in the subsequent augmentors.
idxs = self.rng.permutation(len(self.aug_lists))
tfms = [self.aug_lists[k].get_transform(img)
for k in range(len(self.aug_lists))]
return TransformList([tfms[k] for k in idxs])
class MapImage(ImageAugmentor):
"""
Map the image array by simple functions.
"""
def __init__(self, func, coord_func=None):
"""
Args:
func: a function which takes an image array and return an augmented one
coord_func: optional. A function which takes coordinates and return augmented ones.
Coordinates should be Nx2 array of (x, y)s.
"""
super(MapImage, self).__init__()
self.func = func
self.coord_func = coord_func
def get_transform(self, img):
if self.coord_func:
return TransformFactory(name="MapImage", apply_image=self.func, apply_coords=self.coord_func)
else:
return TransformFactory(name="MapImage", apply_image=self.func)
| 3,722 | 30.025 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/scope_utils.py | # -*- coding: utf-8 -*-
# File: scope_utils.py
import functools
from contextlib import contextmanager
from ..compat import tfv1 as tf
from ..utils.argtools import graph_memoized
from ..utils import logger
from .common import get_tf_version_tuple
__all__ = ['auto_reuse_variable_scope', 'cached_name_scope', 'under_name_scope']
def auto_reuse_variable_scope(func):
"""
A decorator which automatically reuses the current variable scope if the
function has been called with the same variable scope before.
Example:
.. code-block:: python
@auto_reuse_variable_scope
def myfunc(x):
return tf.layers.conv2d(x, 128, 3)
myfunc(x1) # will inherit parent scope reuse
myfunc(x2) # will reuse
with tf.variable_scope('newscope'):
myfunc(x3) # will inherit parent scope reuse
myfunc(x4) # will reuse
"""
used_scope = set()
@functools.wraps(func)
def wrapper(*args, **kwargs):
scope = tf.get_variable_scope()
h = hash((tf.get_default_graph(), scope.name))
# print("Entering " + scope.name + " reuse: " + str(h in used_scope))
if h in used_scope:
if get_tf_version_tuple() >= (1, 5):
with tf.variable_scope(scope, reuse=True, auxiliary_name_scope=False):
return func(*args, **kwargs)
else:
ns = tf.get_default_graph().get_name_scope()
with tf.variable_scope(scope, reuse=True), \
tf.name_scope(ns + '/' if ns else ''):
return func(*args, **kwargs)
else:
used_scope.add(h)
return func(*args, **kwargs)
return wrapper
def under_name_scope(name_scope=None):
"""
Args:
name_scope(str): the default scope to use. If None, will use the name of the function.
Returns:
A decorator which makes the function run under a name scope.
The name scope is obtained by the following:
1. The 'name_scope' keyword argument when the decorated function is called.
2. The 'name_scope' argument of the decorator.
3. (default) The name of the decorated function itself.
If the name is taken and cannot be used, a warning will be
printed in the first case.
Example:
.. code-block:: python
@under_name_scope()
def rms(x):
return tf.sqrt(
tf.reduce_mean(tf.square(x)))
rms(tensor) # will be called under name scope 'rms'
rms(tensor, name_scope='scope') # will be called under name scope 'scope'
Todo:
Add a reuse option.
"""
def _impl(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
warn_incorrect_scope = 'name_scope' in kwargs
scopename = kwargs.pop('name_scope', name_scope)
if scopename is None:
scopename = func.__name__
if warn_incorrect_scope:
# cached_name_scope will try to reenter the existing scope
with cached_name_scope(scopename, top_level=False) as scope:
scope = scope.strip('/')
# but it can still conflict with an existing tensor
if not scope.endswith(scopename):
logger.warn(""" \
Calling function {} with name_scope='{}', but actual name scope becomes '{}'. \
The name '{}' might be taken.""".format(func.__name__, scopename, scope.split('/')[-1], scopename))
return func(*args, **kwargs)
else:
with tf.name_scope(scopename):
return func(*args, **kwargs)
return wrapper
return _impl
def under_variable_scope():
"""
Returns:
A decorator which makes the function happen under a variable scope,
which is named by the function itself.
Example:
.. code-block:: python
@under_variable_scope()
def mid_level(x):
with argscope(Conv2D, kernel_shape=3, nl=BNReLU):
x = Conv2D('conv1', x, 512, stride=1)
x = Conv2D('conv2', x, 256, stride=1)
return x
"""
def _impl(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
name = func.__name__
with tf.variable_scope(name):
return func(*args, **kwargs)
return wrapper
return _impl
@graph_memoized
def _get_cached_ns(name):
with tf.name_scope(None):
with tf.name_scope(name) as scope:
return scope
@contextmanager
def cached_name_scope(name, top_level=True):
"""
Return a context which either opens and caches a new name scope,
or reenter an existing one.
Args:
top_level(bool): if True, the name scope will always be top-level.
It will not be nested under any existing name scope of the caller.
"""
if not top_level:
current_ns = tf.get_default_graph().get_name_scope()
if current_ns:
name = current_ns + '/' + name
ns = _get_cached_ns(name)
with tf.name_scope(ns):
yield ns
| 5,196 | 29.934524 | 99 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/gradproc.py | # -*- coding: utf-8 -*-
# File: gradproc.py
import inspect
import re
from abc import ABCMeta, abstractmethod
import six
import tensorflow as tf
from ..compat import tfv1
from ..utils import logger
from .summary import add_moving_summary
from .symbolic_functions import print_stat, rms
__all__ = ['GradientProcessor',
'FilterNoneGrad', 'GlobalNormClip', 'MapGradient', 'SummaryGradient',
'PrintGradient', 'CheckGradient', 'ScaleGradient']
@six.add_metaclass(ABCMeta)
class GradientProcessor(object):
"""
Base class for all gradient processors.
Gradient processors can be applied to optimizers by
:func:`optimizer.apply_grad_processors`.
Subclass should override the ``_process()`` method.
"""
_name_scope = None
def process(self, grads):
"""
Process the symbolic gradients.
Args:
grads (list): list of (grad, var).
Returns:
list: processed gradients, with the same type as input.
"""
# reuse the old name_scope, if process() is called multiple times
if self._name_scope is None:
with tfv1.name_scope(type(self).__name__) as scope:
self._name_scope = scope
return self._process(grads)
else:
with tfv1.name_scope(self._name_scope):
return self._process(grads)
@abstractmethod
def _process(self, grads):
pass
class FilterNoneGrad(GradientProcessor):
"""
Skip the update and print a warning (instead of crashing),
when the gradient of certain variable is None.
"""
def __init__(self, verbose=True):
"""
Args:
verbose (bool): whether to print warning about None gradients.
"""
super(FilterNoneGrad, self).__init__()
self._verbose = verbose
def _process(self, grads):
g = []
to_print = []
for grad, var in grads:
if grad is None:
to_print.append(var.op.name)
else:
g.append((grad, var))
if self._verbose and len(to_print):
message = ', '.join(to_print)
logger.warn("No gradient w.r.t {} trainable variables: {}".format(len(to_print), message))
return g
class GlobalNormClip(GradientProcessor):
""" Clip by global norm.
The global norm is the sum of norm for **all** gradients.
See :func:`tf.clip_by_global_norm` for more information.
"""
def __init__(self, global_norm):
"""
Args:
global_norm(float): the threshold to clip with.
"""
super(GlobalNormClip, self).__init__()
self._norm = float(global_norm)
def _process(self, grads):
g = [k[0] for k in grads]
v = [k[1] for k in grads]
g, _ = tf.clip_by_global_norm(g, self._norm, name='clip_by_global_norm')
return list(zip(g, v))
class MapGradient(GradientProcessor):
"""
Apply a function on all gradient if the name matches regex.
Keep the other gradients unchanged.
It can be used for gradient clipping, etc.
"""
def __init__(self, func, regex='.*'):
"""
Args:
func: a user-supplied function which takes one or two arguments.
The argument(s) can be either a `grad` tensor, or `grad` and `var`.
The function should return the new gradient to be used.
If it return None, the gradient is discarded (hence no update to the variable will happen).
regex (str): used to match variables. Defaults to match all variables.
"""
args = inspect.getfullargspec(func).args
arg_num = len(args) - inspect.ismethod(func)
assert arg_num in [1, 2], \
"The function must take 1 or 2 arguments! ({})".format(args)
if arg_num == 1:
self.func = lambda grad, var: func(grad)
else:
self.func = func
if not regex.endswith('$'):
regex = regex + '$'
self.regex = regex
super(MapGradient, self).__init__()
def _process(self, grads):
ret = []
matched = False
for grad, var in grads:
if re.match(self.regex, var.op.name):
matched = True
grad = self.func(grad, var)
if grad is not None:
ret.append((grad, var))
else:
ret.append((grad, var))
if not matched:
logger.warn("[MapGradient] No match was found for regex {}.".format(self.regex))
return ret
# TODO has dependency problems: sess.run may not depend on grad
# maybe group maintain op and grad ?
class SummaryGradient(MapGradient):
"""
For each gradient tensor, summary its histogram and add it to moving
summaries.
"""
# avoid duplicate summaries from towers
# TODO this is global. not good.
_summaried_gradient = set()
def __init__(self, regex='.*', collections=None):
"""
Args:
regex(str): same as in :class:`MapGradient`.
collections (list[str]): list of collection names
"""
super(SummaryGradient, self).__init__(self._mapper, regex)
self._coll = collections
def _mapper(self, grad, var):
name = var.op.name
if re.match('tower[0-9]+/', name):
# replicated training, var may come from different towers
return grad
if name not in SummaryGradient._summaried_gradient:
SummaryGradient._summaried_gradient.add(name)
tfv1.summary.histogram(name + '-grad', grad, collections=self._coll)
add_moving_summary(rms(grad, name=name + '/rms'))
return grad
class PrintGradient(MapGradient):
"""
Print the gradients every step with :func:`symbolic_functions.print_stat`.
"""
_printed = set()
# TODO this is global. not good.
def __init__(self, regex='.*'):
"""
Args:
regex(str): same as in :class:`MapGradient`.
"""
super(PrintGradient, self).__init__(self._mapper, regex)
def _mapper(self, grad, var):
name = var.op.name
if name not in PrintGradient._printed:
PrintGradient._printed.add(name)
grad = print_stat(grad, message=name + '-grad')
return grad
class CheckGradient(MapGradient):
"""
Run :func:`tf.check_numerics` for each gradient.
"""
def __init__(self):
super(CheckGradient, self).__init__(self._mapper)
def _mapper(self, grad, var):
# this was very slow.... see #3649
# op = tf.Assert(tf.reduce_all(tf.is_finite(var)), [var], summarize=100)
grad = tf.check_numerics(grad, 'CheckGradient/' + var.op.name)
return grad
class ScaleGradient(MapGradient):
"""
Scale certain gradient by a multiplier.
"""
def __init__(self, multipliers, verbose=True):
"""
Args:
multipliers (tuple or list): tuple of (regex, float), or list of such tuples.
verbose (bool): whether to print logs or not
Example:
Use double learning rate for all the bias (as in caffe), and freeze layer0:
.. code-block:: python
from tensorpack.tfutils import optimizer, gradproc
opt = optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(
[('.*/b', 2.), ('layer0/.*', 0.)]
)])
"""
if not isinstance(multipliers, list):
multipliers = [multipliers]
self.multipliers = multipliers
assert verbose in [True, False], verbose
self._verbose = verbose
super(ScaleGradient, self).__init__(self._mapper)
def _mapper(self, grad, var):
varname = var.op.name
for regex, val in self.multipliers:
# always match against the whole name
if not regex.endswith('$'):
regex = regex + '$'
if re.match(regex, varname):
if self._verbose:
logger.info("Gradient of '{}' is multipled by {}".format(varname, val))
if val != 0: # skip zero to speed up
return grad * val
else:
return None
return grad
| 8,395 | 30.683019 | 107 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/export.py | # -*- coding: utf-8 -*-
# File: export.py
"""
A collection of functions to ease the process of exporting
a model for production.
"""
import tensorflow as tf
from tensorflow.python.framework import graph_util
from tensorflow.python.platform import gfile
from tensorflow.python.tools import optimize_for_inference_lib
from ..compat import tfv1
from ..input_source import PlaceholderInput
from ..tfutils.common import get_tensors_by_names, get_tf_version_tuple
from ..tfutils.tower import PredictTowerContext
from ..utils import logger
__all__ = ['ModelExporter']
class ModelExporter(object):
"""Export models for inference."""
def __init__(self, config):
"""Initialise the export process.
Args:
config (PredictConfig): the config to use.
The graph will be built with the tower function defined by this `PredictConfig`.
Then the input / output names will be used to export models for inference.
"""
super(ModelExporter, self).__init__()
self.config = config
def export_compact(self, filename, optimize=True, toco_compatible=False):
"""Create a self-contained inference-only graph and write final graph (in pb format) to disk.
Args:
filename (str): path to the output graph
optimize (bool): whether to use TensorFlow's `optimize_for_inference`
to prune and optimize the graph. This does not work on all types of graphs.
toco_compatible (bool): See TensorFlow's
`optimize_for_inference
<https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/tools/optimize_for_inference.py>`_
for details. Only available after TF 1.8.
"""
if toco_compatible:
assert optimize, "toco_compatible is only effective when optimize=True!"
self.graph = self.config._maybe_create_graph()
with self.graph.as_default():
input = PlaceholderInput()
input.setup(self.config.input_signature)
with PredictTowerContext(''):
self.config.tower_func(*input.get_input_tensors())
input_tensors = get_tensors_by_names(self.config.input_names)
output_tensors = get_tensors_by_names(self.config.output_names)
self.config.session_init._setup_graph()
# we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph
sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True))
self.config.session_init._run_init(sess)
dtypes = [n.dtype for n in input_tensors]
# freeze variables to constants
frozen_graph_def = graph_util.convert_variables_to_constants(
sess,
self.graph.as_graph_def(),
[n.name[:-2] for n in output_tensors],
variable_names_whitelist=None,
variable_names_blacklist=None)
# prune unused nodes from graph
if optimize:
toco_args = () if get_tf_version_tuple() < (1, 8) else (toco_compatible, )
frozen_graph_def = optimize_for_inference_lib.optimize_for_inference(
frozen_graph_def,
[n.name[:-2] for n in input_tensors],
[n.name[:-2] for n in output_tensors],
[dtype.as_datatype_enum for dtype in dtypes],
*toco_args)
with gfile.FastGFile(filename, "wb") as f:
f.write(frozen_graph_def.SerializeToString())
logger.info("Output graph written to {}.".format(filename))
def export_serving(self, filename,
tags=None,
signature_name='prediction_pipeline'):
"""
Converts a checkpoint and graph to a servable for TensorFlow Serving.
Use TF's `SavedModelBuilder` to export a trained model without tensorpack dependency.
Args:
filename (str): path for export directory
tags (tuple): tuple of user specified tags. Defaults to just "SERVING".
signature_name (str): name of signature for prediction
Note:
This produces
.. code-block:: none
variables/ # output from the vanilla Saver
variables.data-?????-of-?????
variables.index
saved_model.pb # a `SavedModel` protobuf
Currently, we only support a single signature, which is the general PredictSignatureDef:
https://github.com/tensorflow/serving/blob/master/tensorflow_serving/g3doc/signature_defs.md
"""
if tags is None:
tags = (tf.saved_model.SERVING if get_tf_version_tuple() >= (1, 12)
else tf.saved_model.tag_constants.SERVING, )
self.graph = self.config._maybe_create_graph()
with self.graph.as_default():
input = PlaceholderInput()
input.setup(self.config.input_signature)
with PredictTowerContext(''):
self.config.tower_func(*input.get_input_tensors())
input_tensors = get_tensors_by_names(self.config.input_names)
saved_model = tfv1.saved_model.utils
inputs_signatures = {t.name: saved_model.build_tensor_info(t) for t in input_tensors}
output_tensors = get_tensors_by_names(self.config.output_names)
outputs_signatures = {t.name: saved_model.build_tensor_info(t) for t in output_tensors}
self.config.session_init._setup_graph()
# we cannot use "self.config.session_creator.create_session()" here since it finalizes the graph
sess = tfv1.Session(config=tfv1.ConfigProto(allow_soft_placement=True))
self.config.session_init._run_init(sess)
builder = tfv1.saved_model.builder.SavedModelBuilder(filename)
prediction_signature = tfv1.saved_model.signature_def_utils.build_signature_def(
inputs=inputs_signatures,
outputs=outputs_signatures,
method_name=tfv1.saved_model.signature_constants.PREDICT_METHOD_NAME)
builder.add_meta_graph_and_variables(
sess, list(tags),
signature_def_map={signature_name: prediction_signature})
builder.save()
logger.info("SavedModel created at {}.".format(filename))
| 6,509 | 42.4 | 122 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/tower.py | # -*- coding: utf-8 -*-
# File: tower.py
from abc import ABCMeta, abstractmethod, abstractproperty
import six
from ..compat import tfv1 as tf
from ..utils import logger
from ..utils.argtools import call_only_once
from ..utils.develop import HIDE_DOC
from ..utils.naming import MOVING_SUMMARY_OPS_KEY
from .collection import CollectionGuard
from .common import get_op_or_tensor_by_name, get_op_tensor_name
__all__ = ['get_current_tower_context', 'BaseTowerContext', 'TowerContext',
'TowerFuncWrapper', 'TowerFunc',
'TowerTensorHandle', 'TowerTensorHandles']
_CurrentTowerContext = None
@six.add_metaclass(ABCMeta)
class BaseTowerContext(object):
""" A context where the current model is built in.
You need to use :func:`TowerContext` to create a :class:`BaseTowerContext`.
"""
@HIDE_DOC
def __init__(self, ns_name, vs_name=''):
"""
This is not supposed to be used by users.
You need to use :func:`TowerContext` to create a :class:`BaseTowerContext`.
Args:
ns_name (str): The name scope of the tower.
vs_name (str): Open a new variable scope with this name.
"""
self._name = ns_name
self._vs_name = vs_name
if len(vs_name):
assert len(ns_name), "TowerContext(vs_name) cannot be used with an empty name!"
@abstractproperty
def is_main_training_tower(self):
"""
bool: Whether this tower is the main (i.e., the first) training tower.
"""
pass
@abstractproperty
def has_own_variables(self):
"""
bool: Whether this tower is supposed to have its own trainable variables.
"""
pass
@property
def name(self):
"""
str: The name scope of the tower.
"""
return self._name
@property
def vs_name(self):
"""
str: The variable scope of the tower.
"""
return self._vs_name
@property
def ns_name(self):
"""
str: The name scope of the tower.
"""
return self._name
def get_collection_in_tower(self, key):
"""
From a collection, get items that are __added__ to the collection in this tower.
Note that it works by tracking the collection at the beginning and end of
the tower function.
Therefore it does not guarantee that the items are __created__ in this tower.
"""
return self._collection_guard.get_collection_in_tower(key)
@call_only_once
def _get_scopes(self):
"""
Returns the ns and vs for this tower.
"""
if not len(self._name):
# work around https://github.com/tensorflow/tensorflow/issues/14703
return [tf.variable_scope(tf.get_variable_scope())]
ret = []
if len(self._vs_name):
ret.append(tf.variable_scope(self._vs_name))
else:
# caller should have handled reuse outside of TowerContext
ret.append(tf.variable_scope(tf.get_variable_scope()))
# always clear existing ns # TODO check existing ns
if len(self._name):
ret.append(tf.name_scope(self._name + '/'))
return ret
@abstractmethod
def _keys_to_freeze(self):
pass
def __enter__(self):
global _CurrentTowerContext
assert _CurrentTowerContext is None, "Cannot nest TowerContext!"
_CurrentTowerContext = self
self._collection_guard = CollectionGuard(
self._name,
check_diff=not self.is_main_training_tower,
freeze_keys=self._keys_to_freeze())
self._ctxs = self._get_scopes()
self._ctxs.append(self._collection_guard)
for c in self._ctxs:
c.__enter__()
# check that ns_name is always the same as _name
ns = tf.get_default_graph().get_name_scope()
assert ns == self._name, \
"Name conflict: name_scope inside tower '{}' becomes '{}'!".format(self._name, ns) \
+ " You may need a different name for the tower!"
return self
def __exit__(self, exc_type, exc_val, exc_tb):
global _CurrentTowerContext
_CurrentTowerContext = None
if not self.has_own_variables:
diff_trainable_vars = self._collection_guard.get_collection_in_tower(tf.GraphKeys.TRAINABLE_VARIABLES)
assert len(diff_trainable_vars) == 0, \
"New TRAINABLE_VARIABLES shouldn't be created in {}: ".format(
self._name) + ', '.join([k.name for k in diff_trainable_vars])
for c in self._ctxs[::-1]:
c.__exit__(exc_type, exc_val, exc_tb)
return False
def __str__(self):
return "TowerContext(name={}, is_training={})".format(
self._name, self._is_training)
@property
def is_training(self):
"""
bool: whether the context is training or not
"""
return self._is_training
class TrainTowerContext(BaseTowerContext):
def __init__(self, ns_name, vs_name='', index=0, total=1):
"""
Args:
index (int): index of this tower, only used in training.
total (int): total number of towers to be built.
"""
super(TrainTowerContext, self).__init__(ns_name, vs_name)
self._is_training = True
self.index = int(index)
self.total = int(total)
if self.index > 0:
assert self.total > self.index, "(index, total) = ({}, {})".format(self.index, self.total)
vs = tf.get_variable_scope()
assert vs.name == '', "Cannot nest TrainTowerContext with an existing variable scope!"
if vs_name:
assert not vs.reuse, \
"Cannot create tower {} with vs_name={} under reuse=True!".format(ns_name, vs_name)
self._original_vs_reuse = vs.reuse
@property
def is_main_training_tower(self):
return self.index == 0
@property
def has_own_variables(self):
if self._original_vs_reuse:
return False
return self.index == 0 or len(self._vs_name) > 0
def _keys_to_freeze(self):
if self.index == 0:
return []
return [tf.GraphKeys.SUMMARIES, MOVING_SUMMARY_OPS_KEY]
class PredictTowerContext(BaseTowerContext):
def __init__(self, ns_name, vs_name=''):
super(PredictTowerContext, self).__init__(ns_name, vs_name)
self._is_training = False
self._initial_vs_reuse = tf.get_variable_scope().reuse
@property
def has_own_variables(self):
return not self._initial_vs_reuse
@property
def is_main_training_tower(self):
return False
def _keys_to_freeze(self):
# freeze UPDATE_OPS during inference because they should never be used
return [tf.GraphKeys.SUMMARIES, MOVING_SUMMARY_OPS_KEY, tf.GraphKeys.UPDATE_OPS]
def get_current_tower_context():
"""
When called inside a TowerContext, returns the TowerContext.
Returns:
a :class:`BaseTowerContext` instance or None, if not called under a TowerContext.
"""
return _CurrentTowerContext
def TowerContext(tower_name, is_training, vs_name=''):
"""
The context for a tower function, containing metadata about the current tower.
Tensorpack trainers use :class:`TowerContext` to manage tower function.
Many tensorpack layers have to be called under a :class:`TowerContext`.
Example:
.. code-block:: python
with TowerContext('', is_training=True):
# call a tensorpack layer or a tower function
"""
if is_training:
return TrainTowerContext(tower_name, vs_name=vs_name)
else:
return PredictTowerContext(tower_name, vs_name=vs_name)
class TowerFunc(object):
"""
A tower function (see
`tutorial on tower function
<http://tensorpack.readthedocs.io/tutorial/extend/trainer.html#tower-trainer>`_)
It keeps track of the name scope, variable scope and input/output tensors
each time the function is called.
:class:`TowerTrainer` needs this so that it knows how to build a predictor.
Conceptually, this class is roughly equivalent to `tf.function` with input signature, introduced in TF 2.0.
"""
def __init__(self, tower_fn, input_signature):
"""
Args:
tower_func: a function which builds one tower in the graph.
It takes several input tensors and could return anything.
input_signature ([TensorSpec]): list of :class:`tf.TensorSpec`.
They are used to figure out the names for the input tensors.
"""
assert callable(tower_fn), tower_fn
self._inputs_names = [k.name for k in input_signature]
assert len(set(self._inputs_names)) == len(self._inputs_names), \
"Duplicated names in input_signature! " + str(self._inputs_names)
for name in self._inputs_names:
if any(k in name for k in [':', '/', ' ']):
raise ValueError("Invalid input name: '{}'".format(name))
self._tower_fn = tower_fn
self._input_signature = input_signature
self._handles = []
def __new__(cls, tower_fn, _):
# to avoid double-wrapping a function
if isinstance(tower_fn, TowerFunc):
return tower_fn
else:
return super(TowerFunc, cls).__new__(cls)
def __call__(self, *args):
ctx = get_current_tower_context()
assert ctx is not None, "Function must be called under TowerContext!"
output = self._tower_fn(*args)
handle = TowerTensorHandle(ctx, args, output, self._input_signature)
self._handles.append(handle)
return output
@property
def towers(self):
"""
TowerTensorHandles: a :class:`TowerTensorHandles` object, that can
access the tower handles by either indices or names.
"""
return TowerTensorHandles(self._handles)
@property
def input_signature(self):
return self._input_signature
TowerFuncWrapper = TowerFunc
class TowerTensorHandles(object):
"""
Wrap a list of :class:`TowerTensorHandle`,
to support access to them by index or names.
"""
def __init__(self, handles):
self._handles = handles
self._name_to_handle = {k.ns_name: k for k in handles}
def __len__(self):
return len(self._handles)
def __getitem__(self, name_or_index):
"""
Args:
name_or_index (str or int):
Returns:
a :class:`TowerTensorHandle`.
"""
if isinstance(name_or_index, int):
return self._handles[name_or_index]
return self._name_to_handle[name_or_index]
def training(self):
"""
Returns:
A :class:`TowerTensorHandles`, containing only the training towers.
"""
handles = [h for h in self._handles if h.is_training]
return TowerTensorHandles(handles)
def inference(self):
"""
Returns:
A :class:`TowerTensorHandles`, containing only the inference towers.
"""
handles = [h for h in self._handles if not h.is_training]
return TowerTensorHandles(handles)
class TowerTensorHandle(object):
"""
When a function is called multiple times under each tower,
it becomes hard to keep track of the scope and access those tensors
in each tower.
This class provides easy access to the tensors as well as the
inputs/outputs created in each tower.
"""
@HIDE_DOC
def __init__(self, ctx, inputs, outputs, input_signature=None):
self._ctx = ctx
self._extra_tensor_names = {}
if input_signature is not None:
assert len(input_signature) == len(inputs)
self._extra_tensor_names = {
get_op_tensor_name(x.name)[1]: y for x, y in zip(input_signature, inputs)}
self._inputs = inputs
self._outputs = outputs
# TODO: deprecated. Remove them later
self.input = inputs
self.output = outputs
@property
def vs_name(self):
return self._ctx.vs_name
@property
def ns_name(self):
return self._ctx.ns_name
def get_tensor(self, name):
"""
Get a tensor in this tower. The name argument can be:
1. The name of a tensor/variable without any tower prefix.
2. A name in the input signature, if it is used when building the tower.
In the second case, this method will return the tensor that's used as the corresponding
input to the tower. Note that this tensor may have a different name (e.g. may be an output of a queue).
"""
name = get_op_tensor_name(name)[1]
if len(self.ns_name):
name_with_ns = self.ns_name + "/" + name
else:
name_with_ns = name
try:
ret = get_op_or_tensor_by_name(name_with_ns)
except KeyError:
if name in self._extra_tensor_names:
return self._extra_tensor_names[name]
else:
if name in self._extra_tensor_names:
mapped_tensor = self._extra_tensor_names[name]
logger.info(
"'{}' may refer to both the Tensor/Placeholder '{}' or the input to the tower '{}'.".format(
name, ret.name, mapped_tensor.name) +
" Assuming it is the input '{}'.".format(mapped_tensor.name))
return mapped_tensor
return ret
# should also allow variables in get_tensor
return self.get_variable(name)
def get_tensors(self, names):
"""
Like :meth:`get_tensor`, but takes a list and returns a list.
"""
return [self.get_tensor(name) for name in names]
def __getitem__(self, name):
"""
The same as :meth:`get_tensor`.
"""
return self.get_tensor(name)
def get_variable(self, name):
"""
Get a variable used in this tower.
The name should not contain the variable scope prefix of the tower.
When the tower has the same variable scope and name scope, this is equivalent to
:meth:`get_tensor`.
"""
name = get_op_tensor_name(name)[1]
if len(self.vs_name):
name_with_vs = self.vs_name + "/" + name
else:
name_with_vs = name
return get_op_or_tensor_by_name(name_with_vs)
def get_variables(self, names):
"""
Like :meth:`get_variable`, but takes a list and returns a list.
"""
return [self.get_variable(name) for name in names]
def get_collection(self, key=None, name=None):
"""
See :meth:`BaseTowerContext.get_collection_in_tower`.
Args:
key (str): the key of the collection
name: deprecated
"""
if name is not None:
logger.warn("TowerTensorHandle.get_collection(name=..) was renamed to (key=..) !")
key = name
return self._ctx.get_collection_in_tower(key)
@property
def inputs(self):
"""
list[Tensor]: The list of input tensors used to build the tower.
"""
return self._inputs
@property
def outputs(self):
"""
list[Tensor]: The outputs returned by the tower function.
"""
return self._outputs
@property
def is_training(self):
return self._ctx.is_training
| 15,572 | 31.04321 | 114 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/symbolic_functions.py | # -*- coding: utf-8 -*-
# File: symbolic_functions.py
import tensorflow as tf
from ..compat import tfv1
__all__ = ['print_stat', 'rms']
def print_stat(x, message=None):
""" A simple print Op that might be easier to use than :meth:`tf.Print`.
Use it like: ``x = print_stat(x, message='This is x')``.
"""
if message is None:
message = x.op.name
lst = [tf.shape(x), tf.reduce_mean(x)]
if x.dtype.is_floating:
lst.append(rms(x))
return tf.Print(x, lst + [x], summarize=20,
message=message, name='print_' + x.op.name)
# for internal use only
def rms(x, name=None):
"""
Returns:
root mean square of tensor x.
"""
if name is None:
name = x.op.name + '/rms'
with tfv1.name_scope(None): # name already contains the scope
return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)
return tf.sqrt(tf.reduce_mean(tf.square(x)), name=name)
# don't hurt to leave it here
def psnr(prediction, ground_truth, maxp=None, name='psnr'):
"""`Peak Signal to Noise Ratio <https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio>`_.
.. math::
PSNR = 20 \cdot \log_{10}(MAX_p) - 10 \cdot \log_{10}(MSE)
Args:
prediction: a :class:`tf.Tensor` representing the prediction signal.
ground_truth: another :class:`tf.Tensor` with the same shape.
maxp: maximum possible pixel value of the image (255 in in 8bit images)
Returns:
A scalar tensor representing the PSNR
"""
maxp = float(maxp)
def log10(x):
with tf.name_scope("log10"):
numerator = tf.log(x)
denominator = tf.log(tf.constant(10, dtype=numerator.dtype))
return numerator / denominator
mse = tf.reduce_mean(tf.square(prediction - ground_truth))
if maxp is None:
psnr = tf.multiply(log10(mse), -10., name=name)
else:
psnr = tf.multiply(log10(mse), -10.)
psnr = tf.add(tf.multiply(20., log10(maxp)), psnr, name=name)
return psnr
| 2,049 | 27.873239 | 96 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/dependency.py |
import tensorflow as tf
from ..utils.argtools import graph_memoized
"""
Utils about parsing dependencies in the graph.
"""
__all__ = [
'dependency_of_targets', 'dependency_of_fetches'
]
@graph_memoized
def dependency_of_targets(targets, op):
"""
Check that op is in the subgraph induced by the dependencies of targets.
The result is memoized.
This is useful if some SessionRunHooks should be run only together with certain ops.
Args:
targets: a tuple of ops or tensors. The targets to find dependencies of.
op (tf.Operation or tf.Tensor):
Returns:
bool: True if any one of `targets` depend on `op`.
"""
# TODO tensorarray? sparsetensor?
if isinstance(op, tf.Tensor):
op = op.op
assert isinstance(op, tf.Operation), op
try:
from tensorflow.contrib.graph_editor import get_backward_walk_ops # deprecated
except ImportError:
from tensorflow.python.ops.op_selector import get_backward_walk_ops
# alternative implementation can use graph_util.extract_sub_graph
dependent_ops = get_backward_walk_ops(targets, control_inputs=True)
return op in dependent_ops
def dependency_of_fetches(fetches, op):
"""
Check that op is in the subgraph induced by the dependencies of fetches.
fetches may have more general structure.
Args:
fetches: An argument to `sess.run`. Nested structure will affect performance.
op (tf.Operation or tf.Tensor):
Returns:
bool: True if any of `fetches` depend on `op`.
"""
try:
from tensorflow.python.client.session import _FetchHandler as FetchHandler
# use the graph of the op, so that this function can be called without being under a default graph
handler = FetchHandler(op.graph, fetches, {})
targets = tuple(handler.fetches() + handler.targets())
except ImportError:
if isinstance(fetches, list):
targets = tuple(fetches)
elif isinstance(fetches, dict):
raise ValueError("Don't know how to parse dictionary to fetch list! "
"This is a bug of tensorpack.")
else:
targets = (fetches, )
return dependency_of_targets(targets, op)
if __name__ == '__main__':
a = tf.random_normal(shape=[3, 3])
b = tf.random_normal(shape=[3, 3])
print(dependency_of_fetches(a, a))
print(dependency_of_fetches([a, b], a))
| 2,442 | 30.727273 | 106 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/model_utils.py | # -*- coding: utf-8 -*-
# File: model_utils.py
# Author: tensorpack contributors
from ..compat import tfv1 as tf
from tabulate import tabulate
from termcolor import colored
from .common import get_op_tensor_name
from ..utils import logger
__all__ = []
def describe_trainable_vars():
"""
Print a description of the current model parameters.
Skip variables starting with "tower", as they are just duplicates built by data-parallel logic.
"""
train_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
if len(train_vars) == 0:
logger.warn("No trainable variables in the graph!")
return
total = 0
total_bytes = 0
data = []
for v in train_vars:
if v.name.startswith('tower'):
continue
shape = v.get_shape()
ele = shape.num_elements()
if ele is None:
logger.warn("Shape of variable {} is not fully defined but {}.".format(v.name, shape))
ele = 0
try:
shape = shape.as_list()
except ValueError:
shape = '<unknown>'
total += ele
total_bytes += ele * v.dtype.size
data.append([get_op_tensor_name(v.name)[0], shape, ele, v.device, v.dtype.base_dtype.name])
headers = ['name', 'shape', '#elements', 'device', 'dtype']
dtypes = list({x[4] for x in data})
if len(dtypes) == 1 and dtypes[0] == "float32":
# don't log the dtype if all vars are float32 (default dtype)
for x in data:
del x[4]
del headers[4]
devices = {x[3] for x in data}
if len(devices) == 1:
# don't log the device if all vars on the same device
for x in data:
del x[3]
del headers[3]
table = tabulate(data, headers=headers)
size_mb = total_bytes / 1024.0**2
summary_msg = colored(
"\nNumber of trainable variables: {}".format(len(data)) +
"\nNumber of parameters (elements): {}".format(total) +
"\nStorage space needed for all trainable variables: {:.02f}MB".format(size_mb),
'cyan')
logger.info(colored("List of Trainable Variables: \n", 'cyan') + table + summary_msg)
def get_shape_str(tensors):
"""
Internally used by layer registry, to print shapes of inputs/outputs of layers.
Args:
tensors (list or tf.Tensor): a tensor or a list of tensors
Returns:
str: a string to describe the shape
"""
if isinstance(tensors, (list, tuple)):
for v in tensors:
assert isinstance(v, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(v))
shape_str = ", ".join(map(get_shape_str, tensors))
else:
assert isinstance(tensors, (tf.Tensor, tf.Variable)), "Not a tensor: {}".format(type(tensors))
shape_str = str(tensors.get_shape().as_list()).replace("None", "?")
return shape_str
| 2,866 | 31.954023 | 102 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/sesscreate.py | # -*- coding: utf-8 -*-
# File: sesscreate.py
from ..compat import tfv1 as tf
from ..utils import logger
from .common import get_default_sess_config
__all__ = ['NewSessionCreator', 'ReuseSessionCreator', 'SessionCreatorAdapter']
"""
A SessionCreator should:
create the session
initialize all variables
return a session that is ready to use
not finalize the graph
"""
_WRN1 = """User-provided custom session config may not work due to TF bugs. If you saw logs like
```
tensorflow/core/common_runtime/gpu/gpu_device.cc:1433] Found device 0 with properties:
```
before this line, then your GPU has been initialized and custom GPU options may not take effect. """
_WRN2 = """To workaround this issue, you can do one of the following:
1. Avoid initializing the GPU too early. Find code that initializes the GPU and skip it.
Typically examples are: creating a session; check GPU availability; check GPU number.
2. Manually set your GPU options earlier. You can create a session with custom
GPU options at the beginning of your program, as described in
https://github.com/tensorpack/tensorpack/issues/497
"""
class NewSessionCreator(tf.train.SessionCreator):
def __init__(self, target='', config=None):
"""
Args:
target, config: same as :meth:`Session.__init__()`.
config: a :class:`tf.ConfigProto` instance, defaults to :func:`tfutils.get_default_sess_config()`
"""
self.target = target
if config is None:
# distributed trainer doesn't support user-provided config
# we set this attribute so that they can check
self.user_provided_config = False
config = get_default_sess_config()
else:
self.user_provided_config = True
logger.warn(_WRN1)
logger.warn(_WRN2)
self.config = config
def create_session(self):
sess = tf.Session(target=self.target, config=self.config)
def blocking_op(x):
"""
Whether an op is possibly blocking.
"""
if x.op_def is not None and not x.op_def.is_stateful:
return False
if "Dequeue" in x.type or "Enqueue" in x.type:
return True
if "Unstage" in x.type:
return True
if x.type in ["ZMQPull"]:
return True
return False
def run(op):
try:
from tensorflow.contrib.graph_editor import get_backward_walk_ops # deprecated
except ImportError:
from tensorflow.python.ops.op_selector import get_backward_walk_ops
deps = get_backward_walk_ops(op, control_inputs=True)
for dep_op in deps:
if blocking_op(dep_op):
logger.warn(
"Initializer '{}' depends on a blocking op '{}'. "
"This initializer is likely to hang!".format(
op.name, dep_op.name))
sess.run(op)
run(tf.global_variables_initializer())
run(tf.local_variables_initializer())
run(tf.tables_initializer())
return sess
class ReuseSessionCreator(tf.train.SessionCreator):
"""
Returns an existing session.
"""
def __init__(self, sess):
"""
Args:
sess (tf.Session): the session to reuse
"""
self.sess = sess
def create_session(self):
return self.sess
class SessionCreatorAdapter(tf.train.SessionCreator):
"""
Apply a function on the output of a SessionCreator. Can be used to create a debug session.
Note:
Since TF 1.6, debug session may not work properly with Monitored session.
This is a tensorflow bug. To use tfdbg, use the :class:`TFLocalCLIDebugHook` callback instead.
"""
def __init__(self, session_creator, func):
"""
Args:
session_creator (tf.train.SessionCreator): a session creator
func (tf.Session -> tf.Session): takes a session created by
``session_creator``, and return a new session to be returned by ``self.create_session``
"""
self._creator = session_creator
self._func = func
def create_session(self):
sess = self._creator.create_session()
return self._func(sess)
| 4,379 | 32.692308 | 109 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/sessinit.py | # -*- coding: utf-8 -*-
# File: sessinit.py
import os
import numpy as np
import six
from ..compat import tfv1 as tf
from ..utils import logger
from .common import get_op_tensor_name
from .varmanip import SessionUpdate, get_checkpoint_path, get_savename_from_varname, is_training_name
__all__ = ['SessionInit', 'ChainInit',
'SaverRestore', 'SaverRestoreRelaxed', 'DictRestore',
'JustCurrentSession', 'get_model_loader', 'SmartInit']
class SessionInit(object):
""" Base class for utilities to load variables to a (existing) session. """
def init(self, sess):
"""
Initialize a session
Args:
sess (tf.Session): the session
"""
self._setup_graph()
self._run_init(sess)
def _setup_graph(self):
pass
def _run_init(self, sess):
pass
class JustCurrentSession(SessionInit):
""" This is a no-op placeholder"""
pass
class CheckpointReaderAdapter(object):
"""
An adapter to work around old checkpoint format, where the keys are op
names instead of tensor names (with :0).
"""
def __init__(self, reader):
self._reader = reader
m = self._reader.get_variable_to_shape_map()
self._map = {k if k.endswith(':0') else k + ':0': v
for k, v in six.iteritems(m)}
def get_variable_to_shape_map(self):
return self._map
def get_tensor(self, name):
if self._reader.has_tensor(name):
return self._reader.get_tensor(name)
if name in self._map:
assert name.endswith(':0'), name
name = name[:-2]
return self._reader.get_tensor(name)
def has_tensor(self, name):
return name in self._map
# some checkpoint might not have ':0'
def get_real_name(self, name):
if self._reader.has_tensor(name):
return name
assert self.has_tensor(name)
return name[:-2]
class MismatchLogger(object):
def __init__(self, exists, nonexists):
self._exists = exists
self._nonexists = nonexists
self._names = []
def add(self, name):
self._names.append(get_op_tensor_name(name)[0])
def log(self):
if len(self._names):
logger.warn("The following variables are in the {}, but not found in the {}: {}".format(
self._exists, self._nonexists, ', '.join(self._names)))
class SaverRestore(SessionInit):
"""
Restore a tensorflow checkpoint saved by :class:`tf.train.Saver` or :class:`ModelSaver`.
"""
def __init__(self, model_path, prefix=None, ignore=()):
"""
Args:
model_path (str): a model name (model-xxxx) or a ``checkpoint`` file.
prefix (str): during restore, add a ``prefix/`` for every variable in this checkpoint.
ignore (tuple[str]): tensor names that should be ignored during loading, e.g. learning-rate
"""
if model_path.endswith('.npy') or model_path.endswith('.npz'):
logger.warn("SaverRestore expect a TF checkpoint, but got a model path '{}'.".format(model_path) +
" To load from a dict, use 'DictRestore'.")
model_path = get_checkpoint_path(model_path)
self.path = model_path # attribute used by AutoResumeTrainConfig!
self.prefix = prefix
self.ignore = [i if i.endswith(':0') else i + ':0' for i in ignore]
def _setup_graph(self):
dic = self._get_restore_dict()
self.saver = tf.train.Saver(var_list=dic, name=str(id(dic)))
def _run_init(self, sess):
logger.info("Restoring checkpoint from {} ...".format(self.path))
self.saver.restore(sess, self.path)
@staticmethod
def _read_checkpoint_vars(model_path):
""" return a set of strings """
reader = tf.train.NewCheckpointReader(model_path)
reader = CheckpointReaderAdapter(reader) # use an adapter to standardize the name
ckpt_vars = reader.get_variable_to_shape_map().keys()
return reader, set(ckpt_vars)
def _match_vars(self, func):
reader, chkpt_vars = SaverRestore._read_checkpoint_vars(self.path)
graph_vars = tf.global_variables()
chkpt_vars_used = set()
mismatch = MismatchLogger('graph', 'checkpoint')
for v in graph_vars:
name = get_savename_from_varname(v.name, varname_prefix=self.prefix)
if name in self.ignore and reader.has_tensor(name):
logger.info("Variable {} in the graph will not be loaded from the checkpoint!".format(name))
else:
if reader.has_tensor(name):
func(reader, name, v)
chkpt_vars_used.add(name)
else:
# use tensor name (instead of op name) for logging, to be consistent with the reverse case
if not is_training_name(v.name):
mismatch.add(v.name)
mismatch.log()
mismatch = MismatchLogger('checkpoint', 'graph')
if len(chkpt_vars_used) < len(chkpt_vars):
unused = chkpt_vars - chkpt_vars_used
for name in sorted(unused):
if not is_training_name(name):
mismatch.add(name)
mismatch.log()
def _get_restore_dict(self):
var_dict = {}
def f(reader, name, v):
name = reader.get_real_name(name)
assert name not in var_dict, "Restore conflict: {} and {}".format(v.name, var_dict[name].name)
var_dict[name] = v
self._match_vars(f)
return var_dict
class SaverRestoreRelaxed(SaverRestore):
""" Same as :class:`SaverRestore`, but has more relaxed constraints.
It allows upcasting certain variables, or reshape certain
variables when there is a mismatch that can be fixed.
When variable shape and value shape do not match, it will print a
warning but will not crash.
Another advantage is that it doesn't add any new ops to the graph.
"""
def _run_init(self, sess):
logger.info(
"Restoring checkpoint from {} ...".format(self.path))
matched_pairs = []
def f(reader, name, v):
val = reader.get_tensor(name)
val = SessionUpdate.relaxed_value_for_var(val, v, ignore_mismatch=True)
if val is not None:
matched_pairs.append((v, val))
with sess.as_default():
self._match_vars(f)
upd = SessionUpdate(sess, [x[0] for x in matched_pairs])
upd.update({x[0].name: x[1] for x in matched_pairs})
class DictRestore(SessionInit):
"""
Restore variables from a dictionary.
"""
def __init__(self, variable_dict, ignore_mismatch=False):
"""
Args:
variable_dict (dict): a dict of {name: value}
ignore_mismatch (bool): ignore failures when the value and the
variable does not match in their shapes.
If False, it will throw exception on such errors.
If True, it will only print a warning.
"""
assert isinstance(variable_dict, dict), type(variable_dict)
# use varname (with :0) for consistency
self._prms = {get_op_tensor_name(n)[1]: v for n, v in six.iteritems(variable_dict)}
self._ignore_mismatch = ignore_mismatch
def _run_init(self, sess):
variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
variable_names_list = [k.name for k in variables]
variable_names = set(variable_names_list)
param_names = set(six.iterkeys(self._prms))
# intersect has the original ordering of variables
intersect = [v for v in variable_names_list if v in param_names]
# use opname (without :0) for clarity in logging
logger.info("Variables to restore from dict: {}".format(
', '.join(get_op_tensor_name(x)[0] for x in intersect)))
mismatch = MismatchLogger('graph', 'dict')
for k in sorted(variable_names - param_names):
if not is_training_name(k):
mismatch.add(k)
mismatch.log()
mismatch = MismatchLogger('dict', 'graph')
for k in sorted(param_names - variable_names):
mismatch.add(k)
mismatch.log()
upd = SessionUpdate(sess, [v for v in variables if v.name in intersect], ignore_mismatch=self._ignore_mismatch)
logger.info("Restoring {} variables from dict ...".format(len(intersect)))
upd.update({name: value for name, value in six.iteritems(self._prms) if name in intersect})
class ChainInit(SessionInit):
"""
Initialize a session by a list of :class:`SessionInit` instance, executed one by one.
This can be useful for, e.g., loading several models from different files
to form a composition of models.
"""
def __init__(self, sess_inits):
"""
Args:
sess_inits (list[SessionInit]): list of :class:`SessionInit` instances.
"""
self.inits = sess_inits
def _setup_graph(self):
for i in self.inits:
i._setup_graph()
def _run_init(self, sess):
for i in self.inits:
i._run_init(sess)
def SmartInit(obj, *, ignore_mismatch=False):
"""
Create a :class:`SessionInit` to be loaded to a session,
automatically from any supported objects, with some smart heuristics.
The object can be:
+ A TF checkpoint
+ A dict of numpy arrays
+ A npz file, to be interpreted as a dict
+ An empty string or None, in which case the sessinit will be a no-op
+ A list of supported objects, to be initialized one by one
Args:
obj: a supported object
ignore_mismatch (bool): ignore failures when the value and the
variable does not match in their shapes.
If False, it will throw exception on such errors.
If True, it will only print a warning.
Returns:
SessionInit:
"""
if not obj:
return JustCurrentSession()
if isinstance(obj, list):
return ChainInit([SmartInit(x, ignore_mismatch=ignore_mismatch) for x in obj])
if isinstance(obj, six.string_types):
obj = os.path.expanduser(obj)
if obj.endswith(".npy") or obj.endswith(".npz"):
assert tf.gfile.Exists(obj), "File {} does not exist!".format(obj)
filename = obj
logger.info("Loading dictionary from {} ...".format(filename))
if filename.endswith('.npy'):
obj = np.load(filename, encoding='latin1').item()
elif filename.endswith('.npz'):
obj = dict(np.load(filename))
elif len(tf.gfile.Glob(obj + "*")):
# Assume to be a TF checkpoint.
# A TF checkpoint must be a prefix of an actual file.
return (SaverRestoreRelaxed if ignore_mismatch else SaverRestore)(obj)
else:
raise ValueError("Invalid argument to SmartInit: " + obj)
if isinstance(obj, dict):
return DictRestore(obj, ignore_mismatch=ignore_mismatch)
raise ValueError("Invalid argument to SmartInit: " + type(obj))
get_model_loader = SmartInit
| 11,260 | 35.092949 | 119 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/varreplace.py | # -*- coding: utf-8 -*-
# File: varreplace.py
# Credit: Qinyao He
from contextlib import contextmanager
from ..compat import tfv1 as tf
from .common import get_tf_version_tuple
__all__ = ['custom_getter_scope', 'freeze_variables', 'remap_variables']
@contextmanager
def custom_getter_scope(custom_getter):
"""
Args:
custom_getter: the same as in :func:`tf.get_variable`
Returns:
The current variable scope with a custom_getter.
"""
scope = tf.get_variable_scope()
if get_tf_version_tuple() >= (1, 5):
with tf.variable_scope(
scope, custom_getter=custom_getter,
auxiliary_name_scope=False):
yield
else:
ns = tf.get_default_graph().get_name_scope()
with tf.variable_scope(
scope, custom_getter=custom_getter):
with tf.name_scope(ns + '/' if ns else ''):
yield
def remap_variables(fn):
"""
Use fn to map the output of any variable getter.
Args:
fn (tf.Variable -> tf.Tensor)
Returns:
The current variable scope with a custom_getter that maps
all the variables by fn.
Example:
.. code-block:: python
from tensorpack.tfutils import varreplace
with varreplace.remap_variables(lambda var: quantize(var)):
x = FullyConnected('fc', x, 1000) # fc/{W,b} will be quantized
"""
def custom_getter(getter, *args, **kwargs):
v = getter(*args, **kwargs)
return fn(v)
return custom_getter_scope(custom_getter)
def freeze_variables(stop_gradient=True, skip_collection=False):
"""
Return a context to freeze variables,
by wrapping ``tf.get_variable`` with a custom getter.
It works by either applying ``tf.stop_gradient`` on the variables,
or keeping them out of the ``TRAINABLE_VARIABLES`` collection, or
both. Both options have their own pros and cons.
Example:
.. code-block:: python
from tensorpack.tfutils import varreplace
with varreplace.freeze_variable(stop_gradient=False, skip_collection=True):
x = FullyConnected('fc', x, 1000) # fc/* will not be trained
Args:
stop_gradient (bool): if True, variables returned from `get_variable`
will be wrapped with `tf.stop_gradient`.
Note that the created variables may still have gradient when accessed
by other approaches (e.g. by name, or by collection).
For example, they may still have a gradient in weight decay.
Also note that this makes `tf.get_variable` returns a Tensor instead of a Variable,
which may break existing contract.
Therefore, it's recommended to use the `skip_collection` option instead.
skip_collection (bool): if True, do not add the variable to
``TRAINABLE_VARIABLES`` collection, but to ``MODEL_VARIABLES``
collection. As a result they will not be trained by default.
Note:
`stop_gradient` only stops variables returned by `get_variable` **within the context** to
contribute no gradient in this context. Therefore it may not completely freeze the variables.
For example:
1. If a variable is created, or reused outside of the context, it can still contribute to the
gradient of other tensors.
2. If a freezed variable is accessed by other approaches (e.g., by names, by collections),
it can still contribute to the gradient of other tensors.
For example, weight decay cannot be stopped by a `stop_gradient` context.
`skip_collection` has to be used the first time the variable is created.
Once `skip_collection` is used, the variable is not a trainable variable anymore,
and will be completely freezed from gradient update in tensorpack's single-cost trainer.
Choose the option carefully depend on what you need.
"""
def custom_getter(getter, *args, **kwargs):
trainable = kwargs.get('trainable', True)
name = args[0] if len(args) else kwargs.get('name')
if skip_collection:
kwargs['trainable'] = False
v = getter(*args, **kwargs)
# do not perform unnecessary changes if it's not originally trainable
# otherwise the variable may get added to MODEL_VARIABLES twice
if trainable and skip_collection:
tf.add_to_collection(tf.GraphKeys.MODEL_VARIABLES, v)
if trainable and stop_gradient:
v = tf.stop_gradient(v, name='freezed_' + name)
return v
return custom_getter_scope(custom_getter)
| 4,647 | 37.413223 | 101 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/unit_tests.py | # -*- coding: utf-8 -*-
import unittest
import tensorflow as tf
from ..utils import logger
from .scope_utils import under_name_scope
class ScopeUtilsTest(unittest.TestCase):
@under_name_scope(name_scope='s')
def _f(self, check=True):
if check:
assert tf.get_default_graph().get_name_scope().endswith('s')
return True
def test_under_name_scope(self):
self.assertTrue(self._f())
with self.assertRaises(AssertionError):
self._f() # name conflict
def test_under_name_scope_warning(self):
x = tf.placeholder(tf.float32, [3])
tf.nn.relu(x, name='s')
with self.assertLogs(logger=logger._logger, level='WARNING'):
self._f(check=False, name_scope='s')
if __name__ == '__main__':
unittest.main()
| 808 | 24.28125 | 72 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/common.py | # -*- coding: utf-8 -*-
# File: common.py
from collections import defaultdict
from six.moves import map
from tabulate import tabulate
import os
import sys
import psutil
import tensorflow as tf
import numpy as np
from ..compat import tfv1
from ..utils.argtools import graph_memoized
from ..utils.utils import find_library_full_path as find_library
from ..utils.nvml import NVMLContext
from ..libinfo import __git_version__
__all__ = ['get_default_sess_config',
'get_global_step_value',
'get_global_step_var',
'get_tf_version_tuple',
'collect_env_info'
# 'get_op_tensor_name',
# 'get_tensors_by_names',
# 'get_op_or_tensor_by_name',
]
def get_default_sess_config(mem_fraction=0.99):
"""
Return a tf.ConfigProto to use as default session config.
You can modify the returned config to fit your needs.
Args:
mem_fraction(float): see the `per_process_gpu_memory_fraction` option
in TensorFlow's GPUOptions protobuf:
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/protobuf/config.proto
Returns:
tf.ConfigProto: the config to use.
"""
conf = tfv1.ConfigProto()
conf.allow_soft_placement = True
# conf.log_device_placement = True
conf.intra_op_parallelism_threads = 1
conf.inter_op_parallelism_threads = 0
# TF benchmark use cpu_count() - gpu_thread_count(), e.g. 80 - 8 * 2
# Didn't see much difference.
conf.gpu_options.per_process_gpu_memory_fraction = mem_fraction
# This hurt performance of large data pipeline:
# https://github.com/tensorflow/benchmarks/commit/1528c46499cdcff669b5d7c006b7b971884ad0e6
# conf.gpu_options.force_gpu_compatible = True
conf.gpu_options.allow_growth = True
# from tensorflow.core.protobuf import rewriter_config_pb2 as rwc
# conf.graph_options.rewrite_options.memory_optimization = \
# rwc.RewriterConfig.HEURISTICS
# May hurt performance?
# conf.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
# conf.graph_options.place_pruned_graph = True
return conf
@graph_memoized
def get_global_step_var():
"""
Returns:
tf.Tensor: the global_step variable in the current graph. Create if doesn't exist.
"""
scope = tfv1.VariableScope(reuse=False, name='') # the root vs
with tfv1.variable_scope(scope):
var = tfv1.train.get_or_create_global_step()
return var
def get_global_step_value():
"""
Returns:
int: global_step value in current graph and session
Has to be called under a default session.
"""
return tfv1.train.global_step(
tfv1.get_default_session(),
get_global_step_var())
def get_op_tensor_name(name):
"""
Will automatically determine if ``name`` is a tensor name (ends with ':x')
or a op name.
If it is an op name, the corresponding tensor name is assumed to be ``op_name + ':0'``.
Args:
name(str): name of an op or a tensor
Returns:
tuple: (op_name, tensor_name)
"""
if len(name) >= 3 and name[-2] == ':':
return name[:-2], name
else:
return name, name + ':0'
def get_tensors_by_names(names):
"""
Get a list of tensors in the default graph by a list of names.
Args:
names (list):
"""
ret = []
G = tfv1.get_default_graph()
for n in names:
opn, varn = get_op_tensor_name(n)
ret.append(G.get_tensor_by_name(varn))
return ret
def get_op_or_tensor_by_name(name):
"""
Get either tf.Operation of tf.Tensor from names.
Args:
name (list[str] or str): names of operations or tensors.
Raises:
KeyError, if the name doesn't exist
"""
G = tfv1.get_default_graph()
def f(n):
if len(n) >= 3 and n[-2] == ':':
return G.get_tensor_by_name(n)
else:
return G.get_operation_by_name(n)
if not isinstance(name, list):
return f(name)
else:
return list(map(f, name))
def gpu_available_in_session():
sess = tfv1.get_default_session()
for dev in sess.list_devices():
if dev.device_type.lower() == 'gpu':
return True
return False
def get_tf_version_tuple():
"""
Return TensorFlow version as a 2-element tuple (for comparison).
"""
return tuple(map(int, tf.__version__.split('.')[:2]))
def collect_env_info():
"""
Returns:
str - a table contains important information about the environment
"""
data = []
data.append(("sys.platform", sys.platform))
data.append(("Python", sys.version.replace("\n", "")))
data.append(("Tensorpack", __git_version__))
data.append(("Numpy", np.__version__))
data.append(("TensorFlow", tfv1.VERSION + "/" + tfv1.GIT_VERSION))
data.append(("TF Compiler Version", tfv1.COMPILER_VERSION))
has_cuda = tf.test.is_built_with_cuda()
data.append(("TF CUDA support", has_cuda))
try:
from tensorflow.python.framework import test_util
data.append(("TF MKL support", test_util.IsMklEnabled()))
except Exception:
pass
try:
from tensorflow.python.framework import test_util
data.append(("TF XLA support", test_util.is_xla_enabled()))
except Exception:
pass
if has_cuda:
data.append(("Nvidia Driver", find_library("nvidia-ml")))
data.append(("CUDA", find_library("cudart")))
data.append(("CUDNN", find_library("cudnn")))
data.append(("NCCL", find_library("nccl")))
# List devices with NVML
data.append(
("CUDA_VISIBLE_DEVICES",
os.environ.get("CUDA_VISIBLE_DEVICES", "Unspecified")))
try:
devs = defaultdict(list)
with NVMLContext() as ctx:
for idx, dev in enumerate(ctx.devices()):
devs[dev.name()].append(str(idx))
for devname, devids in devs.items():
data.append(
("GPU " + ",".join(devids), devname))
except Exception:
data.append(("GPU", "Not found with NVML"))
vram = psutil.virtual_memory()
data.append(("Free RAM", "{:.2f}/{:.2f} GB".format(vram.available / 1024**3, vram.total / 1024**3)))
data.append(("CPU Count", psutil.cpu_count()))
# Other important dependencies:
try:
import horovod
data.append(("Horovod", horovod.__version__))
except ImportError:
pass
try:
import cv2
data.append(("cv2", cv2.__version__))
except ImportError:
pass
import msgpack
data.append(("msgpack", ".".join([str(x) for x in msgpack.version])))
has_prctl = True
try:
import prctl
_ = prctl.set_pdeathsig # noqa
except Exception:
has_prctl = False
data.append(("python-prctl", has_prctl))
return tabulate(data)
if __name__ == '__main__':
print(collect_env_info())
| 7,032 | 27.132 | 104 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/collection.py | # -*- coding: utf-8 -*-
# File: collection.py
from contextlib import contextmanager
from copy import copy
import six
from ..compat import tfv1 as tf
from ..utils import logger
from ..utils.argtools import memoized
__all__ = ['backup_collection',
'restore_collection',
'freeze_collection']
def backup_collection(keys=None):
"""
Args:
keys (list): list of collection keys to backup.
Defaults to all keys in the graph.
Returns:
dict: the backup
"""
if keys is None:
keys = tf.get_default_graph().get_all_collection_keys()
ret = {}
assert isinstance(keys, (list, tuple, set))
for k in keys:
ret[k] = copy(tf.get_collection(k))
return ret
def restore_collection(backup):
"""
Restore from a collection backup.
Args:
backup (dict):
"""
for k, v in six.iteritems(backup):
del tf.get_collection_ref(k)[:]
tf.get_collection_ref(k).extend(v)
@contextmanager
def freeze_collection(keys):
"""
Args:
keys(list): list of collection keys to freeze.
Returns:
a context where the collections are in the end restored to its initial state.
"""
backup = backup_collection(keys)
yield
restore_collection(backup)
@memoized
def get_inverse_graphkeys():
ret = {}
for name in dir(tf.GraphKeys):
if name.startswith('_'):
continue
if name in ['VARIABLES']: # will produce deprecated warning
continue
ret[getattr(tf.GraphKeys, name)] = "tf.GraphKeys.{}".format(name)
return ret
class CollectionGuard(object):
"""
A context to maintain collection change in a tower.
"""
original = None
def __init__(self, name, check_diff,
freeze_keys=(),
diff_whitelist=None):
"""
Args:
name (str): name of the tower
check_diff (bool): whether to check and print about collection change
when leaving this guard.
freeze_keys (list): list of keys to backup when entering and restore when leaving this guard.
diff_whitelist (list): list of keys to ignore, when check_diff is True.
Defaults to some collections that are normally changed,
including variables, losses, contexts, queue runners.
"""
self._name = name
self._check_diff = check_diff
if diff_whitelist is None:
diff_whitelist = CollectionGuard._default_diff_whitelist()
self._whitelist = set(diff_whitelist)
self._freeze_keys = freeze_keys
self._inverse_graphkeys = get_inverse_graphkeys()
@staticmethod
def _default_diff_whitelist():
ret = [tf.GraphKeys.TRAINABLE_VARIABLES,
tf.GraphKeys.GLOBAL_VARIABLES,
tf.GraphKeys.QUEUE_RUNNERS,
tf.GraphKeys.LOCAL_VARIABLES]
for newkey in ['COND_CONTEXT', 'WHILE_CONTEXT', 'LOSSES']:
if hasattr(tf.GraphKeys, newkey):
ret.append(getattr(tf.GraphKeys, newkey))
return ret
def _key_name(self, name):
return self._inverse_graphkeys.get(name, name)
def __enter__(self):
self.original = backup_collection()
self._freeze_backup = backup_collection(self._freeze_keys)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
new_coll = backup_collection()
if self._check_diff:
self._print_diff(new_coll)
self._restore_freeze(new_coll)
return False
def _print_diff(self, new):
newly_created = []
size_change = []
for k, v in six.iteritems(new):
if k in self._whitelist or k in self._freeze_keys:
continue
if k not in self.original:
newly_created.append((self._key_name(k), len(v)))
else:
old_v = self.original[k]
if len(old_v) != len(v):
size_change.append((self._key_name(k), len(old_v), len(v)))
if newly_created:
logger.info(
"New collections created in tower {}: ".format(self._name) +
', '.join(["{} of size {}".format(key, size) for key, size in newly_created]))
if size_change:
logger.info(
"Size of these collections were changed in {}: {}".format(
self._name, ', '.join(
map(lambda t: "({}: {}->{})".format(*t),
size_change))))
def _restore_freeze(self, new):
size_change = []
for k, v in six.iteritems(self._freeze_backup):
newv = new.get(k, [])
if len(v) != len(newv):
size_change.append((self._key_name(k), len(v), len(newv)))
if size_change:
logger.info(
"These collections were modified but restored in {}: {}".format(
self._name, ', '.join(
map(lambda t: "({}: {}->{})".format(*t),
size_change))))
restore_collection(self._freeze_backup)
def get_collection_in_tower(self, key):
"""
Get items from this collection that are added in the current tower.
"""
new = tf.get_collection(key)
old = set(self.original.get(key, []))
# persist the order in new
return [x for x in new if x not in old]
| 5,532 | 30.4375 | 104 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/distributed.py | # -*- coding: utf-8 -*-
# File: distributed.py
import tensorflow as tf
def get_distributed_session_creator(server):
"""
Args:
server (tf.train.Server):
Returns:
tf.train.SessionCreator
"""
server_def = server.server_def
is_chief = (server_def.job_name == 'worker') and (server_def.task_index == 0)
init_op = tf.global_variables_initializer()
local_init_op = tf.local_variables_initializer()
ready_op = tf.report_uninitialized_variables()
ready_for_local_init_op = tf.report_uninitialized_variables(tf.global_variables())
sm = tf.train.SessionManager(
local_init_op=local_init_op,
ready_op=ready_op,
ready_for_local_init_op=ready_for_local_init_op,
graph=tf.get_default_graph())
# to debug wrong variable collection
# from pprint import pprint
# print("GLOBAL:")
# pprint([(k.name, k.device) for k in tf.global_variables()])
# print("LOCAL:")
# pprint([(k.name, k.device) for k in tf.local_variables()])
class _Creator(tf.train.SessionCreator):
def create_session(self):
if is_chief:
return sm.prepare_session(master=server.target, init_op=init_op)
else:
tf.logging.set_verbosity(tf.logging.INFO) # print message about uninitialized vars
ret = sm.wait_for_session(master=server.target)
tf.logging.set_verbosity(tf.logging.WARN)
return ret
return _Creator()
| 1,503 | 30.333333 | 100 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/varmanip.py | # -*- coding: utf-8 -*-
# File: varmanip.py
import numpy as np
import os
import pprint
import six
import tensorflow as tf
from ..compat import tfv1
from ..utils import logger
from .common import get_op_tensor_name
__all__ = ['SessionUpdate', 'dump_session_params',
'load_chkpt_vars', 'save_chkpt_vars', 'get_checkpoint_path']
def get_savename_from_varname(
varname, varname_prefix=None,
savename_prefix=None):
"""
Args:
varname(str): a variable name in the graph
varname_prefix(str): an optional prefix that may need to be removed in varname
savename_prefix(str): an optional prefix to append to all savename
Returns:
str: the name used to save the variable
"""
name = varname
if varname_prefix is not None \
and name.startswith(varname_prefix):
name = name[len(varname_prefix) + 1:]
if savename_prefix is not None:
name = savename_prefix + '/' + name
return name
class SessionUpdate(object):
""" Update the variables in a session """
def __init__(self, sess, vars_to_update, ignore_mismatch=False):
"""
Args:
sess (tf.Session): a session object
vars_to_update: a collection of variables to update
ignore_mismatch (bool): ignore failures when the value and the
variable does not match.
"""
self.sess = sess
self.name_map = {v.name: v for v in vars_to_update}
self.ignore_mismatch = ignore_mismatch
@staticmethod
def relaxed_value_for_var(value, var, ignore_mismatch=False):
"""
Returns a relaxed (possibly reshaped/upcast-ed) version of value,
to be loaded to the given variable.
Args:
value (ndarray): an numpy array to be loaded to var
var (tf.Variable):
ignore_mismatch (bool): ignore failures when the value and the
variable does not match.
Returns:
ndarray: a possibly reshaped or casted version of value.
Returns None if `ignore_mismatch==True` and the value and the variable
mismatch.
"""
assert isinstance(var, tf.Variable)
name = var.op.name
# check incompatible shape
varshape = tuple(var.get_shape().as_list())
if varshape != value.shape:
if np.prod(varshape) != np.prod(value.shape):
if ignore_mismatch:
logger.warn(
"Cannot load an array of shape {} into variable '{}' whose shape is {}.".format(
value.shape, name, varshape))
return None
else:
raise ValueError(
"Trying to load an array of shape {} into variable '{}' whose shape is {}.".format(
value.shape, name, varshape))
# TODO only allow reshape when shape different by empty axis
logger.warn("The tensor is reshaped from {} to {} when assigned to '{}'".format(
value.shape, varshape, name))
value = value.reshape(varshape)
# Be permissive, and allow some common type incompatibility problems
def allow_cast(to_type, from_type):
# to_type: a tf dtype
# from_type: a numpy dtype
from_type = tf.as_dtype(from_type)
# allow up/down casting between floating points
if from_type.is_floating and to_type.is_floating:
return True
if from_type.is_integer and to_type.is_integer:
# only allow up-casting between integers
if to_type.min <= from_type.min and to_type.max >= from_type.max:
return True
return False
if hasattr(value, 'dtype'):
vartype = var.dtype.as_numpy_dtype
if vartype != value.dtype:
msg = "Variable {} has dtype {} but was given a value of dtype {}.".format(name, var.dtype, value.dtype)
if allow_cast(var.dtype.base_dtype, value.dtype):
value = vartype(value)
logger.warn(msg + " The value will be loaded after casting!")
else:
assert vartype == value.dtype, msg
return value
def update(self, prms):
"""
Args:
prms(dict): dict of {variable name: value}
Any name in prms must be in the graph and in vars_to_update.
"""
with self.sess.as_default():
fetches = []
feeds = {}
for name, value in six.iteritems(prms):
assert name in self.name_map
var = self.name_map[name]
value = SessionUpdate.relaxed_value_for_var(
value, var, ignore_mismatch=self.ignore_mismatch)
# This is the implementation of `var.load`
if value is not None:
fetches.append(var.initializer)
feeds[var.initializer.inputs[1]] = value
self.sess.run(fetches, feed_dict=feeds)
def dump_session_params(path):
"""
Dump value of all TRAINABLE + MODEL variables to a dict, and save as
npz format (loadable by :func:`sessinit.SmartInit`).
Args:
path(str): the file name to save the parameters. Must ends with npz.
"""
# save variables that are GLOBAL, and either TRAINABLE or MODEL
var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
var.extend(tf.get_collection(tf.GraphKeys.MODEL_VARIABLES))
# TODO dedup
assert len(set(var)) == len(var), "TRAINABLE and MODEL variables have duplication!"
gvars = {k.name for k in tf.global_variables()}
var = [v for v in var if v.name in gvars]
result = {}
for v in var:
result[v.name] = v.eval()
save_chkpt_vars(result, path)
def save_chkpt_vars(dic, path):
"""
Save variables in dic to path.
Args:
dic: {name: value}
path: save as npz if the name ends with '.npz', otherwise save as a checkpoint.
"""
logger.info("Variables to save to {}:".format(path))
keys = sorted(dic.keys())
logger.info(pprint.pformat(keys))
assert not path.endswith('.npy')
if path.endswith('.npz'):
np.savez_compressed(path, **dic)
else:
with tf.Graph().as_default(), \
tf.Session() as sess:
for k, v in six.iteritems(dic):
k = get_op_tensor_name(k)[0]
_ = tf.Variable(name=k, initial_value=v) # noqa
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.save(sess, path, write_meta_graph=False)
def get_checkpoint_path(path):
"""
Work around TF problems in checkpoint path handling.
Args:
path: a user-input path
Returns:
str: the argument that can be passed to NewCheckpointReader
"""
if os.path.basename(path) == path:
path = os.path.join('.', path) # avoid #4921 and #6142
if os.path.basename(path) == 'checkpoint':
assert tfv1.gfile.Exists(path), path
path = tf.train.latest_checkpoint(os.path.dirname(path))
# to be consistent with either v1 or v2
# fix paths if provided a wrong one
new_path = path
if '00000-of-00001' in path:
new_path = path.split('.data')[0]
elif path.endswith('.index'):
new_path = path.split('.index')[0]
if new_path != path:
logger.info(
"Checkpoint path {} is auto-corrected to {}.".format(path, new_path))
path = new_path
assert tfv1.gfile.Exists(path) or tfv1.gfile.Exists(path + '.index'), path
return path
def load_chkpt_vars(path):
""" Load all variables from a checkpoint to a dict.
Args:
path(str): path to a checkpoint.
Returns:
dict: a name:value dict
"""
path = get_checkpoint_path(path)
reader = tfv1.train.NewCheckpointReader(path)
var_names = reader.get_variable_to_shape_map().keys()
result = {}
for n in var_names:
result[n] = reader.get_tensor(n)
return result
def is_training_name(name):
"""
**Guess** if this variable is only used in training.
Only used internally to avoid too many logging. Do not use it.
"""
# TODO: maybe simply check against TRAINABLE_VARIABLES and MODEL_VARIABLES?
# TODO or use get_slot_names()
name = get_op_tensor_name(name)[0]
if name.endswith('/Adam') or name.endswith('/Adam_1'):
return True
if name.endswith('/Momentum'):
return True
if name.endswith('/Adadelta') or name.endswith('/Adadelta_1'):
return True
if name.endswith('/RMSProp') or name.endswith('/RMSProp_1'):
return True
if name.endswith('/Adagrad'):
return True
if name.startswith('EMA/') or '/EMA/' in name: # all the moving average summaries
return True
if name.startswith('AccumGrad') or name.endswith('/AccumGrad'):
return True
if name.startswith('apply_gradients'):
return True
return False
| 9,176 | 34.296154 | 120 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
from .tower import get_current_tower_context, TowerContext
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .common import *
from .sessinit import *
from .argscope import *
# don't want to include everything from .tower
__all__ = ['get_current_tower_context', 'TowerContext']
def _global_import(name):
p = __import__(name, globals(), None, level=1)
lst = p.__all__ if '__all__' in dir(p) else dir(p)
for k in lst:
if not k.startswith('__'):
globals()[k] = p.__dict__[k]
__all__.append(k)
_TO_IMPORT = frozenset([
'common',
'sessinit',
'argscope',
])
for module_name in _TO_IMPORT:
_global_import(module_name)
"""
TODO remove this line in the future.
Better to keep submodule names (sesscreate, varmanip, etc) out of __all__,
so that these names will be invisible under `tensorpack.` namespace.
To use these utilities, users are expected to import them explicitly, e.g.:
import tensorpack.tfutils.sessinit as sessinit
"""
__all__.extend(['sessinit', 'summary', 'optimizer',
'sesscreate', 'gradproc', 'varreplace',
'tower'])
| 1,333 | 25.68 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/optimizer.py | # -*- coding: utf-8 -*-
# File: optimizer.py
from contextlib import contextmanager
import tensorflow as tf
from ..tfutils.common import get_tf_version_tuple
from ..compat import tfv1
from ..utils.develop import HIDE_DOC
from .gradproc import FilterNoneGrad, GradientProcessor
__all__ = ['apply_grad_processors', 'ProxyOptimizer',
'PostProcessOptimizer', 'VariableAssignmentOptimizer',
'AccumGradOptimizer']
class ProxyOptimizer(tfv1.train.Optimizer):
"""
A transparent proxy which delegates all methods of :class:`tf.train.Optimizer`
"""
def __init__(self, opt, name='ProxyOptimizer'):
assert isinstance(opt, tfv1.train.Optimizer), opt
super(ProxyOptimizer, self).__init__(False, name)
self._opt = opt
@HIDE_DOC
def compute_gradients(self, *args, **kwargs):
return self._opt.compute_gradients(*args, **kwargs)
@HIDE_DOC
def get_slot(self, *args, **kwargs):
return self._opt.get_slot(*args, **kwargs)
@HIDE_DOC
def get_slot_names(self, *args, **kwargs):
return self._opt.get_slot_names(*args, **kwargs)
@HIDE_DOC
def apply_gradients(self, *args, **kwargs):
return self._opt.apply_gradients(*args, **kwargs)
def apply_grad_processors(opt, gradprocs):
"""
Wrapper around optimizers to apply gradient processors.
Args:
opt (tf.train.Optimizer):
gradprocs (list[GradientProcessor]): gradient processors to add to the
optimizer.
Returns:
a :class:`tf.train.Optimizer` instance which runs the gradient
processors before updating the variables.
"""
assert isinstance(gradprocs, (list, tuple)), gradprocs
for gp in gradprocs:
assert isinstance(gp, GradientProcessor), gp
class _ApplyGradientProcessor(ProxyOptimizer):
def __init__(self, opt, gradprocs):
self._gradprocs = gradprocs[:]
super(_ApplyGradientProcessor, self).__init__(opt)
def apply_gradients(self, grads_and_vars,
global_step=None, name=None):
g = self._apply(grads_and_vars)
return self._opt.apply_gradients(g, global_step, name)
def _apply(self, g):
for proc in self._gradprocs:
g = proc.process(g)
return g
return _ApplyGradientProcessor(opt, gradprocs)
class PostProcessOptimizer(ProxyOptimizer):
"""
An optimizer which applies some "post-processing operation" per variable
(e.g. clipping, quantization) after the gradient update.
"""
def __init__(self, opt, func, colocate=True):
"""
Args:
opt (tf.train.Optimizer):
func (tf.Variable -> tf.Operation or None): the operation needed
to perform for this variable after the gradient update.
colocate (boolean): colocate the function with the variable. No effect since TF 1.13.
"""
super(PostProcessOptimizer, self).__init__(opt)
self._func = func
self._colocate = colocate
@HIDE_DOC
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
update_op = super(PostProcessOptimizer, self).apply_gradients(
grads_and_vars, global_step)
ops = []
with tf.control_dependencies([update_op]):
for _, var in grads_and_vars:
with self._maybe_colocate(var):
op = self._func(var)
if op is not None:
assert isinstance(op, tf.Operation), op
ops.append(op)
update_op = tf.group(update_op, *ops, name=name)
return update_op
@contextmanager
def _maybe_colocate(self, var):
G = tf.get_default_graph()
if self._colocate and get_tf_version_tuple() <= (1, 12):
with G.colocate_with(var):
yield
else:
yield
class VariableAssignmentOptimizer(PostProcessOptimizer):
"""
An optimizer which assigns each variable a new value (e.g. clipping,
quantization) after the gradient update.
"""
def __init__(self, opt, func):
"""
Args:
opt (tf.train.Optimizer):
func (tf.Variable -> tf.Tensor or None): the new value to be
assigned to this variable after the gradient update.
"""
def f(v):
t = func(v)
if t is None:
return t
return tf.assign(v, t, use_locking=False).op
super(VariableAssignmentOptimizer, self).__init__(opt, f)
class AccumGradOptimizer(ProxyOptimizer):
"""
An optimizer which accumulates gradients across :math:`k` :meth:`minimize` executions,
and apply them together in every :math:`k` th :meth:`minimize` execution.
This is roughly the same as using a :math:`k` times larger batch size plus a
:math:`k` times larger learning rate, but uses much less memory.
This optimizer can be used in any TensorFlow code (with or without tensorpack).
Example:
.. code-block:: python
from tensorpack.tfutils.optimizer import AccumGradOptimizer
myopt = tf.train.GradientDescentOptimizer(0.01)
myopt = AccumGradOptimizer(myopt, niter=5)
train_op = myopt.minimize(loss)
"""
def __init__(self, opt, niter):
"""
Args:
opt (tf.train.Optimizer): the underlying sub-optimizer.
niter (int): number of iterations to accumulate gradients.
"""
super(AccumGradOptimizer, self).__init__(opt, 'AccumGrad')
self._niter = int(niter)
def _create_accum_slots(self, var_list):
slots = []
for v in var_list:
# TODO an option to not colocate the accumulators with variables (to save more memory)
s = self._zeros_slot(v, "accum", self._name)
slots.append(s)
return slots
@HIDE_DOC
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
grads_and_vars = FilterNoneGrad().process(grads_and_vars)
vs = []
for g, v in grads_and_vars:
assert isinstance(g, (tf.Tensor, tf.IndexedSlices)) and isinstance(v, tf.Variable), \
"AccumGradOptimizer does not work for the gradient of {}! " \
"Types of v and g are {} and {}".format(v.op.name, type(v), type(g))
vs.append(v)
with tf.control_dependencies(None):
slots = self._create_accum_slots(vs)
slots_and_vars = [(s, gv[1]) for s, gv in zip(slots, grads_and_vars)]
with tf.variable_scope(self._name), tf.device('/cpu:0'):
counter = tf.Variable(
0, name="counter", trainable=False, dtype=tf.int32)
with tf.name_scope('AccumGradOptimizer'):
ops = []
for s, gv in zip(slots, grads_and_vars):
g, v = gv
ops.append(s.assign_add(g))
update_counter = tf.assign_add(counter, 1, name='update_counter')
update_slot_op = tf.group(update_counter, *ops, name='update_slot')
def update_grad():
update_op = self._opt.apply_gradients(slots_and_vars)
with tf.control_dependencies([update_op]):
clear_ops = [tf.assign(s, tf.zeros_like(s)) for s in slots]
return tf.group(*clear_ops, name='update_grad')
pred = tf.equal(tf.mod(counter, self._niter), 0)
with tf.control_dependencies([update_slot_op]):
if name is None:
name = 'cond_update_grad'
op = tf.cond(pred, update_grad, tf.no_op)
if global_step is not None:
# Tensorpack maintains global_step by other means,
# so this option is useless in tensorpack trainers.
# But we include the implementation here for completeness
global_step_increment = tf.assign_add(global_step, 1)
op = tf.group(op, global_step_increment, name=name)
else:
op = tf.identity(op, name=name).op
return op
if __name__ == '__main__':
# run it with "python -m tensorpack.tfutils.optimizer"
x = tf.get_variable('x', shape=[6])
cost = tf.reduce_sum(tf.abs(x), name='cost')
opt = tf.train.GradientDescentOptimizer(0.01)
opt = AccumGradOptimizer(opt, 5)
min_op = opt.minimize(cost, global_step=tf.train.get_or_create_global_step())
sess = tf.Session()
sess.run(tf.global_variables_initializer())
with sess.as_default():
for _ in range(20):
min_op.run()
print(x.eval())
print(tf.train.get_or_create_global_step().eval())
| 8,777 | 35.123457 | 98 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/summary.py | # -*- coding: utf-8 -*-
# File: summary.py
import re
from contextlib import contextmanager
import six
from six.moves import range
from tensorflow.python.training import moving_averages
from ..compat import tfv1 as tf
from ..utils import logger
from ..utils.argtools import graph_memoized
from ..utils.naming import MOVING_SUMMARY_OPS_KEY
from .scope_utils import cached_name_scope
from .symbolic_functions import rms
from .tower import get_current_tower_context
__all__ = ['add_tensor_summary', 'add_param_summary',
'add_activation_summary', 'add_moving_summary',
]
# some scope stuff to use internally...
@graph_memoized
def _get_cached_vs(name):
with tf.variable_scope(name) as scope:
return scope
@contextmanager
def _enter_vs_reuse_ns(name):
vs = _get_cached_vs(name)
# XXX Not good to enter the cached vs directly, because this will clean-up custom getter
# with tf.variable_scope(name, reuse=tf.AUTO_REUSE): # available in 1.4 only
with tf.variable_scope(vs):
with tf.name_scope(vs.original_name_scope):
yield vs
def create_scalar_summary(name, v):
"""
Args:
name (str):
v (float): scalar value
Returns:
tf.Summary: a tf.Summary object with name and simple scalar value v.
"""
assert isinstance(name, six.string_types), type(name)
v = float(v)
s = tf.Summary()
s.value.add(tag=name, simple_value=v)
return s
def create_image_summary(name, val):
"""
Args:
name(str):
val(np.ndarray): 4D tensor of NHWC. assume RGB if C==3.
Can be either float or uint8. Range has to be [0,255].
Returns:
tf.Summary:
"""
assert isinstance(name, six.string_types), type(name)
n, h, w, c = val.shape
val = val.astype('uint8')
s = tf.Summary()
imparams = [cv2.IMWRITE_PNG_COMPRESSION, 9]
for k in range(n):
arr = val[k]
# CV2 will only write correctly in BGR chanel order
if c == 3:
arr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)
elif c == 4:
arr = cv2.cvtColor(arr, cv2.COLOR_RGBA2BGRA)
tag = name if n == 1 else '{}/{}'.format(name, k)
retval, img_str = cv2.imencode('.png', arr, imparams)
if not retval:
# Encoding has failed.
continue
img_str = img_str.tostring()
img = tf.Summary.Image()
img.height = h
img.width = w
# 1 - grayscale 3 - RGB 4 - RGBA
img.colorspace = c
img.encoded_image_string = img_str
s.value.add(tag=tag, image=img)
return s
def add_tensor_summary(x, types, name=None, collections=None,
main_tower_only=True):
"""
Summarize a tensor by different methods.
Args:
x (tf.Tensor): a tensor to summarize
types (list[str]): summary types, can be scalar/histogram/sparsity/mean/rms
name (str): summary name. Defaults to be the op name.
collections (list[str]): collections of the summary ops.
main_tower_only (bool): Only run under main training tower. If
set to True, calling this function under other TowerContext
has no effect.
Example:
.. code-block:: python
with tf.name_scope('mysummaries'): # to not mess up tensorboard
add_tensor_summary(
tensor, ['histogram', 'rms', 'sparsity'], name='mytensor')
"""
types = set(types)
if name is None:
name = x.op.name
ctx = get_current_tower_context()
if main_tower_only and ctx is not None and not ctx.is_main_training_tower:
return
SUMMARY_TYPES_DIC = {
'scalar': lambda: tf.summary.scalar(name + '-summary', x, collections=collections),
'histogram': lambda: tf.summary.histogram(name + '-histogram', x, collections=collections),
'sparsity': lambda: tf.summary.scalar(
name + '-sparsity', tf.nn.zero_fraction(x),
collections=collections),
'mean': lambda: tf.summary.scalar(
name + '-mean', tf.reduce_mean(x),
collections=collections),
'rms': lambda: tf.summary.scalar(
name + '-rms', rms(x), collections=collections)
}
for typ in types:
SUMMARY_TYPES_DIC[typ]()
def add_activation_summary(x, types=None, name=None, collections=None):
"""
Call :func:`add_tensor_summary` under a reused 'activation-summary' name scope.
This function is a no-op if not calling from main training tower.
Args:
x (tf.Tensor): the tensor to summary.
types (list[str]): summary types, defaults to ``['sparsity', 'rms', 'histogram']``.
name (str): if is None, use x.name.
collections (list[str]): collections of the summary ops.
"""
ndim = x.get_shape().ndims
if ndim < 2:
logger.warn("Cannot summarize scalar activation {}".format(x.name))
return
if types is None:
types = ['sparsity', 'rms', 'histogram']
with cached_name_scope('activation-summary'):
add_tensor_summary(x, types, name=name, collections=collections)
def add_param_summary(*summary_lists, **kwargs):
"""
Add summary ops for all trainable variables matching the regex, under a
reused 'param-summary' name scope.
This function is a no-op if not calling from main training tower.
Args:
summary_lists (list): each is (regex, [list of summary type]).
Summary type is defined in :func:`add_tensor_summary`.
collections (list[str]): collections of the summary ops.
Example:
.. code-block:: python
add_param_summary(
('.*/W', ['histogram', 'rms']),
('.*/gamma', ['scalar']),
)
"""
collections = kwargs.pop('collections', None)
assert len(kwargs) == 0, "Unknown kwargs: " + str(kwargs)
ctx = get_current_tower_context()
if ctx is not None and not ctx.is_main_training_tower:
return
params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
with cached_name_scope('param-summary'):
for p in params:
name = p.op.name
for rgx, actions in summary_lists:
if not rgx.endswith('$'):
rgx = rgx + '$'
if re.match(rgx, name):
add_tensor_summary(p, actions, name=name, collections=collections)
def add_moving_summary(*args, **kwargs):
"""
Summarize the moving average for scalar tensors.
This function is a no-op if not calling from main training tower.
See tutorial at https://tensorpack.readthedocs.io/tutorial/summary.html
Args:
args: scalar tensors to summarize
decay (float): the decay rate. Defaults to 0.95.
collection (str or None): the name of the collection to add EMA-maintaining ops.
The default will work together with the default
:class:`MovingAverageSummary` callback.
summary_collections ([str]): the names of collections to add the
summary op. Default is TF's default (`tf.GraphKeys.SUMMARIES`).
Returns:
[tf.Tensor]:
list of tensors returned by assign_moving_average,
which can be used to maintain the EMA.
"""
decay = kwargs.pop('decay', 0.95)
coll = kwargs.pop('collection', MOVING_SUMMARY_OPS_KEY)
summ_coll = kwargs.pop('summary_collections', None)
assert len(kwargs) == 0, "Unknown arguments: " + str(kwargs)
ctx = get_current_tower_context()
# allow ctx to be none
if ctx is not None and not ctx.is_main_training_tower:
return []
graph = tf.get_default_graph()
try:
control_flow_ctx = graph._get_control_flow_context()
# XLA does not support summaries anyway
# However, this function will generate unnecessary dependency edges,
# which makes the tower function harder to compile under XLA, so we skip it
if control_flow_ctx is not None and control_flow_ctx.IsXLAContext():
return
except Exception:
pass
if tf.get_variable_scope().reuse is True:
logger.warn("add_moving_summary() called under reuse=True scope, ignored.")
return []
for x in args:
assert isinstance(x, (tf.Tensor, tf.Variable)), x
assert x.get_shape().ndims == 0, \
"add_moving_summary() only accepts scalar tensor! Got one with {}".format(x.get_shape())
ema_ops = []
for c in args:
name = re.sub('tower[0-9]+/', '', c.op.name)
with tf.name_scope(None):
if not c.dtype.is_floating:
c = tf.cast(c, tf.float32)
# assign_moving_average creates variables with op names, therefore clear ns first.
with _enter_vs_reuse_ns('EMA') as vs:
ema_var = tf.get_variable(name, shape=c.shape, dtype=c.dtype,
initializer=tf.constant_initializer(),
trainable=False)
ns = vs.original_name_scope
with tf.name_scope(ns): # reuse VS&NS so that EMA_1 won't appear
ema_op = moving_averages.assign_moving_average(
ema_var, c, decay,
zero_debias=True, name=name + '_EMA_apply')
ema_ops.append(ema_op)
with tf.name_scope(None):
tf.summary.scalar(
name + '-summary', ema_op,
collections=summ_coll) # write the EMA value as a summary
if coll is not None:
for op in ema_ops:
tf.add_to_collection(coll, op)
return ema_ops
try:
import cv2
except ImportError:
from ..utils.develop import create_dummy_func
create_image_summary = create_dummy_func('create_image_summary', 'cv2') # noqa
| 9,830 | 34.110714 | 100 | py |
SyNet | SyNet-master/tensorpack/tensorpack/tfutils/argscope.py | # -*- coding: utf-8 -*-
# File: argscope.py
import copy
from collections import defaultdict
from contextlib import contextmanager
from functools import wraps
from inspect import getmembers, isfunction
import tensorflow as tf
from ..compat import is_tfv2
from ..utils import logger
from .model_utils import get_shape_str
from .tower import get_current_tower_context
__all__ = ['argscope', 'get_arg_scope', 'enable_argscope_for_module',
'enable_argscope_for_function']
_ArgScopeStack = []
@contextmanager
def argscope(layers, **kwargs):
"""
Args:
layers (list or layer): layer or list of layers to apply the arguments.
Returns:
a context where all appearance of these layer will by default have the
arguments specified by kwargs.
Example:
.. code-block:: python
with argscope(Conv2D, kernel_shape=3, nl=tf.nn.relu, out_channel=32):
x = Conv2D('conv0', x)
x = Conv2D('conv1', x)
x = Conv2D('conv2', x, out_channel=64) # override argscope
"""
if not isinstance(layers, list):
layers = [layers]
for l in layers:
assert hasattr(l, '__argscope_enabled__'), "Argscope not supported for {}".format(l)
# need to deepcopy so that changes to new_scope does not affect outer scope
new_scope = copy.deepcopy(get_arg_scope())
for l in layers:
new_scope[l.__name__].update(kwargs)
_ArgScopeStack.append(new_scope)
yield
del _ArgScopeStack[-1]
def get_arg_scope():
"""
Returns:
dict: the current argscope.
An argscope is a dict of dict: ``dict[layername] = {arg: val}``
"""
if len(_ArgScopeStack) > 0:
return _ArgScopeStack[-1]
else:
return defaultdict(dict)
def enable_argscope_for_function(func, log_shape=True):
"""Decorator for function to support argscope
Example:
.. code-block:: python
from mylib import myfunc
myfunc = enable_argscope_for_function(myfunc)
Args:
func: A function mapping one or multiple tensors to one or multiple
tensors.
log_shape (bool): Specify whether the first input resp. output tensor
shape should be printed once.
Remarks:
If the function ``func`` returns multiple input or output tensors,
only the first input/output tensor shape is displayed during logging.
Returns:
The decorated function.
"""
assert callable(func), "func should be a callable"
@wraps(func)
def wrapped_func(*args, **kwargs):
actual_args = copy.copy(get_arg_scope()[func.__name__])
actual_args.update(kwargs)
out_tensor = func(*args, **actual_args)
in_tensor = args[0]
ctx = get_current_tower_context()
name = func.__name__ if 'name' not in kwargs else kwargs['name']
if log_shape:
if ('tower' not in ctx.ns_name.lower()) or ctx.is_main_training_tower:
# we assume the first parameter is the most interesting
if isinstance(out_tensor, tuple):
out_tensor_descr = out_tensor[0]
else:
out_tensor_descr = out_tensor
logger.info("{:<12}: {} --> {}".format(
"'" + name + "'",
get_shape_str(in_tensor),
get_shape_str(out_tensor_descr)))
return out_tensor
wrapped_func.__argscope_enabled__ = True
return wrapped_func
def enable_argscope_for_module(module, log_shape=True):
"""
Overwrite all functions of a given module to support argscope.
Note that this function monkey-patches the module and therefore could
have unexpected consequences.
It has been only tested to work well with ``tf.layers`` module.
Example:
.. code-block:: python
import tensorflow as tf
enable_argscope_for_module(tf.layers)
Args:
log_shape (bool): print input/output shapes of each function.
"""
if is_tfv2() and module == tf.layers:
module = tf.compat.v1.layers
for name, obj in getmembers(module):
if isfunction(obj):
setattr(module, name, enable_argscope_for_function(obj,
log_shape=log_shape))
| 4,320 | 28.8 | 92 | py |
SyNet | SyNet-master/tensorpack/tensorpack/train/base.py | # -*- coding: utf-8 -*-
# File: base.py
import copy
import time
import weakref
import tensorflow as tf
from ..compat import tfv1
from ..callbacks import Callback, Callbacks, Monitors, MonitorBase
from ..callbacks.steps import MaintainStepCounter
from ..tfutils import get_global_step_value
from ..tfutils.model_utils import describe_trainable_vars
from ..tfutils.sesscreate import NewSessionCreator, ReuseSessionCreator
from ..tfutils.sessinit import JustCurrentSession, SessionInit
from ..utils import logger
from ..utils.argtools import call_only_once
from ..utils.utils import humanize_time_delta
from .config import DEFAULT_CALLBACKS, DEFAULT_MONITORS, TrainConfig
__all__ = ['StopTraining', 'Trainer']
class StopTraining(Exception):
"""
An exception thrown to stop training.
"""
pass
class TrainLoop(object):
"""
Manage the double for loop.
"""
def __init__(self):
self._epoch_num = 0
self._global_step = 0
self._local_step = -1
def config(self, steps_per_epoch, starting_epoch, max_epoch):
"""
Configure the loop given the settings.
"""
self.starting_epoch = int(starting_epoch)
self.max_epoch = int(max_epoch)
self.steps_per_epoch = int(steps_per_epoch)
# Allow empty epoch (no steps), if we want to run the callbacks only.
assert self.steps_per_epoch >= 0 and self.max_epoch >= 0
self._epoch_num = starting_epoch - 1
def update_global_step(self):
"""
Update the Python-side global_step from TF.
This must be called under initialized default session.
"""
self._global_step = get_global_step_value()
@property
def epoch_num(self):
"""
The number of the currently ongoing epoch.
An epoch is defined to cover the moment before calling `before_epoch` until after calling `trigger_epoch`.
i.e., in the `trigger_epoch` of epoch 3, `self.epoch_num` is 3.
If you need use `self.epoch_num` in your callback, you'll need to know this.
"""
return self._epoch_num
@property
def global_step(self):
"""
The tensorflow global_step, i.e. how many times ``hooked_sess.run`` has been called.
Note:
1. global_step is incremented **after** each ``hooked_sess.run`` returns from TF runtime.
2. If you make zero or more than one calls to ``hooked_sess.run`` in one
:meth:`run_step`, local_step and global_step may increment at different speed.
"""
return self._global_step
@property
def local_step(self):
"""
The number of steps that have finished in the current epoch.
"""
return self._local_step
class Trainer(object):
""" Base class for a trainer.
"""
is_chief = True
"""
Whether this process is the chief worker in distributed training.
Certain callbacks will only be run by chief worker.
"""
sess = None
"""
The ``tf.Session`` object the trainer is using.
Available after :meth:`initialize()`.
Using ``trainer.sess.run`` to evaluate tensors that depend on the training
``InputSource`` may have unexpected effect:
For example, if you use ``trainer.sess.run`` to evaluate a tensor that depends on the
inputs coming from a ``StagingArea``,
it will take a datapoint from the ``StagingArea``, making the ``StagingArea`` empty, and as a result
make the training hang.
"""
hooked_sess = None
"""
The ``tf.train.MonitoredSession`` object the trainer is using.
It contains all the ``before_run/after_run`` hooks the callbacks have registered.
It is used for running the training iterations.
Available after :meth:`initialize()`.
Note that using ``hooked_sess.run`` will evaluate all the hooks,
just like running a training iteration. It may do the following:
1. Take a datapoint from the InputSource
2. Increase the global_step
3. Evaluate some summaries
Typically you **should not** use ``hooked_sess.run`` in callbacks,
because it is for the "training iteration". If you just want to evaluate
some tensors, use ``sess.run`` if the tensors does not depend on the inputs,
or more generally, use `before_run/after_run` to evaluate the tensors **along with**
the training iterations.
"""
def __init__(self):
self._callbacks = []
self.loop = TrainLoop()
def _register_callback(self, cb):
"""
Register callbacks to the trainer.
It can only be called before :meth:`Trainer.train()`.
Args:
cb (Callback or [Callback]): a callback or a list of callbacks
Returns:
succeed or not
"""
if isinstance(cb, (list, tuple)):
for x in cb:
self._register_callback(x)
return
assert isinstance(cb, Callback), cb
assert not isinstance(self._callbacks, Callbacks), \
"Cannot register more callbacks after trainer was setup!"
if not self.is_chief and cb.chief_only:
logger.warn("Callback {} is chief-only, skipped.".format(str(cb)))
return False
else:
self._callbacks.append(cb)
return True
register_callback = _register_callback
def run_step(self):
"""
Defines what to do in one iteration. The default is:
``self.hooked_sess.run(self.train_op)``.
The behavior of each iteration can be changed by either setting ``trainer.train_op``,
or overriding this method.
"""
if not hasattr(self, 'train_op'):
raise NotImplementedError(
"Please either set `Trainer.train_op` or provide an implementation "
"of Trainer.run_step()!")
self.hooked_sess.run(self.train_op)
@call_only_once
def setup_callbacks(self, callbacks, monitors):
"""
Setup callbacks and monitors. Must be called after the main graph is built.
Args:
callbacks ([Callback]):
monitors ([MonitorBase]):
"""
assert isinstance(callbacks, list), callbacks
assert isinstance(monitors, list), monitors
describe_trainable_vars() # TODO weird
self.register_callback(MaintainStepCounter())
for cb in callbacks:
self.register_callback(cb)
for cb in self._callbacks:
assert not isinstance(cb, MonitorBase), "Monitor cannot be pre-registered for now!"
registered_monitors = []
for m in monitors:
if self.register_callback(m):
registered_monitors.append(m)
self.monitors = Monitors(registered_monitors)
self.register_callback(self.monitors) # monitors is also a callback
# some final operations that might modify the graph
logger.info("Setup callbacks graph ...")
self._callbacks = Callbacks(self._callbacks)
self._callbacks.setup_graph(weakref.proxy(self))
@call_only_once
def initialize(self, session_creator, session_init):
"""
Create the session and set `self.sess`.
Call `self.initiailize_hooks()`
Finalize the graph.
It must be called after callbacks are setup.
Args:
session_creator (tf.train.SessionCreator):
session_init (sessinit.SessionInit):
"""
assert isinstance(session_creator, tfv1.train.SessionCreator), session_creator
assert isinstance(session_init, SessionInit), session_init
session_init._setup_graph()
logger.info("Creating the session ...")
self.sess = session_creator.create_session()
self.initialize_hooks()
if self.is_chief:
logger.info("Initializing the session ...")
session_init._run_init(self.sess)
else:
if not isinstance(session_init, JustCurrentSession):
logger.warn("This is not a chief worker, 'session_init' was ignored!")
self.sess.graph.finalize()
logger.info("Graph Finalized.")
@call_only_once
def initialize_hooks(self):
"""
Create SessionRunHooks for all callbacks, and hook it onto `self.sess` to create `self.hooked_sess`.
A new trainer may override this method to create multiple groups of hooks,
which can be useful when the training is not done by a single `train_op`.
"""
hooks = self._callbacks.get_hooks()
self.hooked_sess = tfv1.train.MonitoredSession(
session_creator=ReuseSessionCreator(self.sess), hooks=hooks)
@call_only_once
def main_loop(self, steps_per_epoch, starting_epoch, max_epoch):
"""
Run the main training loop.
Args:
steps_per_epoch, starting_epoch, max_epoch (int):
"""
with self.sess.as_default():
self.loop.config(steps_per_epoch, starting_epoch, max_epoch)
self.loop.update_global_step()
try:
self._callbacks.before_train()
# refresh global step (might have changed by callbacks) TODO ugly
# what if gs is changed later?
self.loop.update_global_step()
for self.loop._epoch_num in range(
self.loop.starting_epoch, self.loop.max_epoch + 1):
logger.info("Start Epoch {} ...".format(self.loop.epoch_num))
self._callbacks.before_epoch()
start_time = time.time()
for self.loop._local_step in range(self.loop.steps_per_epoch):
if self.hooked_sess.should_stop():
return
self.run_step() # implemented by subclass
self._callbacks.trigger_step()
self._callbacks.after_epoch()
logger.info("Epoch {} (global_step {}) finished, time:{}.".format(
self.loop.epoch_num, self.loop.global_step, humanize_time_delta(time.time() - start_time)))
# trigger epoch outside the timing region.
self._callbacks.trigger_epoch()
logger.info("Training has finished!")
except (StopTraining, tf.errors.OutOfRangeError) as e:
logger.info("Training was stopped by exception {}.".format(str(e)))
except KeyboardInterrupt:
logger.info("Detected Ctrl-C and exiting main loop.")
raise
finally:
self._callbacks.after_train()
self.hooked_sess.close()
def train(self,
callbacks, monitors,
session_creator, session_init,
steps_per_epoch, starting_epoch=1, max_epoch=9999999):
"""
Implemented by three lines:
.. code-block:: python
self.setup_callbacks(callbacks, monitors)
self.initialize(session_creator, session_init)
self.main_loop(steps_per_epoch, starting_epoch, max_epoch)
You can call those methods by yourself to have better control on details if needed.
"""
self.setup_callbacks(callbacks, monitors)
self.initialize(session_creator, session_init)
self.main_loop(steps_per_epoch, starting_epoch, max_epoch)
def train_with_defaults(
self, _sentinel=None,
callbacks=None, monitors=None,
session_creator=None, session_init=None,
steps_per_epoch=None, starting_epoch=1, max_epoch=9999999,
extra_callbacks=None):
"""
Same as :meth:`train()`, except:
1. Add `extra_callbacks` to callbacks. The default value for
`extra_callbacks` is :meth:`DEFAULT_CALLBACKS()`.
2. Default value for `monitors` is :meth:`DEFAULT_MONITORS()`.
3. Provide default values for every option except `steps_per_epoch`.
"""
assert _sentinel is None, "Please call `train_with_defaults` with keyword arguments only!"
callbacks = copy.copy(callbacks or [])
monitors = DEFAULT_MONITORS() if monitors is None else monitors
extra_callbacks = DEFAULT_CALLBACKS() if extra_callbacks is None else extra_callbacks
callbacks.extend(extra_callbacks)
assert steps_per_epoch is not None
session_creator = session_creator or NewSessionCreator()
session_init = session_init or JustCurrentSession()
self.train(callbacks, monitors,
session_creator, session_init,
steps_per_epoch, starting_epoch, max_epoch)
def __new__(cls, *args, **kwargs):
if (len(args) > 0 and isinstance(args[0], TrainConfig)) \
or 'config' in kwargs:
logger.error("You're calling new trainers with old trainer API!")
logger.error("See https://github.com/tensorpack/tensorpack/issues/458 for more information.")
import sys
sys.exit(1)
else:
return super(Trainer, cls).__new__(cls)
def _get_property(name):
"""
Delegate property to self.loop
"""
ret = property(
lambda self: getattr(self.loop, name))
try:
ret.__doc__ = getattr(TrainLoop, name).__doc__
except AttributeError:
pass
return ret
for name in ['global_step', 'local_step', 'steps_per_epoch',
'epoch_num', 'starting_epoch', 'max_epoch']:
setattr(Trainer, name, _get_property(name))
| 13,608 | 35.681941 | 115 | py |
SyNet | SyNet-master/tensorpack/tensorpack/train/utility.py | # -*- coding: utf-8 -*-
# File: utility.py
# for backwards-compatibility
from ..graph_builder.utils import LeastLoadedDeviceSetter, OverrideToLocalVariable, override_to_local_variable # noqa
| 193 | 31.333333 | 118 | py |
SyNet | SyNet-master/tensorpack/tensorpack/train/tower.py | # -*- coding: utf-8 -*-
# File: tower.py
from abc import ABCMeta, abstractmethod
import six
import tensorflow as tf
from ..compat import tfv1, is_tfv2
from ..input_source import PlaceholderInput
from ..predict.base import OnlinePredictor
from ..tfutils.gradproc import FilterNoneGrad
from ..tfutils.tower import PredictTowerContext, TowerFunc, get_current_tower_context
from ..utils import logger
from ..utils.argtools import call_only_once, memoized
from ..utils.develop import HIDE_DOC
from .base import Trainer
__all__ = ['SingleCostTrainer', 'TowerTrainer']
class TowerTrainer(Trainer):
"""
Base trainers for models that can be built by calling a tower function under a :class:`TowerContext`.
The assumption of tower function is required by some features that replicates the model
automatically. For example, TowerTrainer can create a predictor for you automatically,
by calling the tower function.
To use :class:`TowerTrainer`, set `tower_func` and use it to build the graph.
Note that `tower_func` can only be set once per instance of `TowerTrainer`.
"""
_tower_func = None
_predictors = []
"""
List of OnlinePredictor ever created for this trainer.
It is maintained for internal use.
"""
@call_only_once
def _set_tower_func(self, tower_func):
assert isinstance(tower_func, TowerFunc), tower_func
self._tower_func = tower_func
@property
def tower_func(self):
"""
A :class:`TowerFunc` instance.
See `tutorial on tower function
<http://tensorpack.readthedocs.io/tutorial/trainer.html#tower-trainer>`_
for more information.
"""
return self._tower_func
@tower_func.setter
def tower_func(self, val):
self._set_tower_func(val)
@property
def input_signature(self):
"""
list[tf.TensorSpec]: metainfo about the inputs to the tower.
"""
return self.tower_func.input_signature
@property
def towers(self):
"""
TowerTensorHandles: used to access the tower handles by either indices or names.
This property is accessbile only after the graph is set up.
With :meth:`towers`, you can then access many attributes of each tower:
Example:
.. code-block:: python
# Access the conv1/output tensor in the first training tower
trainer.towers.training()[0].get_tensor('conv1/output')
"""
return self.tower_func.towers
def get_predictor(self, input_names, output_names, device=0):
"""
This method will build the trainer's tower function under ``TowerContext(is_training=False)``,
and returns a callable predictor with input placeholders & output tensors in this tower.
This method handles the common case where you inference with the same tower function
you provide to the trainer.
If you want to do inference with a different tower function, you can always build the tower by yourself,
under a "reuse" variable scope and a `TowerContext(is_training=False)`.
Args:
input_names (list): list of input names, matching the inputs declared for the trainer.
output_names(list): list of tensor names without the tower prefix.
device (int): build the predictor on device '/gpu:{device}' or use -1 for '/cpu:0'.
Returns:
an :class:`OnlinePredictor`.
Example:
.. code-block:: none
# in the graph:
interesting_tensor = tf.identity(x, name='fun')
# in _setup_graph callback method:
self._predictor = self.trainer.get_predictor(['input1', 'input2'], ['fun'])
# After session is initialized (see Tutorials - Write a Callback), can use it by:
outputs = self._predictor(input1, input2)
The CycleGAN example and DQN example have more concrete use of this method.
"""
assert self.tower_func is not None, "Must set tower_func on the trainer to use get_predictor()!"
tower_name = 'tower-pred-{}'.format(device) if device >= 0 else 'tower-pred-cpu'
device_id = device
device = '/gpu:{}'.format(device_id) if device_id >= 0 else '/cpu:0'
try:
tower = self.tower_func.towers[tower_name]
assert tower is not None, "This is a bug!"
except KeyError:
tower = None
if tower is None:
input = PlaceholderInput()
input.setup(self.input_signature)
vs_name = self._vs_name_for_predictor(device_id)
with tfv1.variable_scope(tfv1.get_variable_scope(), reuse=True), \
tf.device(device), PredictTowerContext(
tower_name, vs_name=vs_name):
logger.info("Building graph for predict tower '{}' on device {} {}...".format(
tower_name, device,
"with variable scope '{}'".format(vs_name) if vs_name else ''))
self.tower_func(*input.get_input_tensors())
tower = self.tower_func.towers[tower_name]
input_tensors = tower.get_tensors(input_names)
output_tensors = tower.get_tensors(output_names)
predictor = OnlinePredictor(input_tensors, output_tensors)
self._predictors.append(predictor)
return predictor
@HIDE_DOC
@call_only_once
def initialize(self, session_creator, session_init):
super(TowerTrainer, self).initialize(session_creator, session_init)
# Predictors are created before creating the session, so they don't have an associated session.
for pred in self._predictors:
pred.sess = self.sess
def _vs_name_for_predictor(self, device):
towers = self.towers.training()
available_ids = list(range(len(towers)))
if device in available_ids:
return towers[device].vs_name
else:
return towers[0].vs_name
@six.add_metaclass(ABCMeta)
class SingleCostTrainer(TowerTrainer):
"""
Base class for single-cost trainer.
Single-cost trainer has a :meth:`setup_graph` method which takes
(input_signature, input, get_cost_fn, get_opt_fn), and build the training graph from them.
To use a :class:`SingleCostTrainer` object, call `trainer.setup_graph(...); trainer.train(...)`.
"""
COLOCATE_GRADIENTS_WITH_OPS = True
"""
See `tf.gradients`. It sometimes can heavily affect performance when backward op does
not support the device of forward op.
"""
GATE_GRADIENTS = False
"""See `tf.gradients`. """
AGGREGATION_METHOD = tf.AggregationMethod.DEFAULT
"""See `tf.gradients`. """
XLA_COMPILE = False
""" Use :func:`xla.compile` to compile the tower function.
Note that XLA has very strong requirements on the tower function, e.g.:
1. limited op support
2. inferrable shape
3. no summary support
and many tower functions cannot be compiled by XLA.
Don't use it if you don't understand it.
"""
@call_only_once
def setup_graph(self, input_signature, input, get_cost_fn, get_opt_fn):
"""
Responsible for building the main training graph for single-cost training.
Args:
input_signature ([TensorSpec]): list of TensorSpec that describe the inputs
input (InputSource): an InputSource which has to match the input signature
get_cost_fn ([tf.Tensor] -> tf.Tensor): callable, takes some input tensors and return a cost tensor.
get_opt_fn (-> tf.train.Optimizer): callable which returns an
optimizer. Will only be called once.
Note:
`get_cost_fn` will be part of the tower function.
It must follows the `rules of tower function.
<http://tensorpack.readthedocs.io/tutorial/trainer.html#tower-trainer>`_.
"""
get_cost_fn = TowerFunc(get_cost_fn, input_signature)
get_opt_fn = memoized(get_opt_fn)
self.tower_func = get_cost_fn
# TODO setup may want to register monitor as well??
input_callbacks = self._setup_input(input_signature, input)
train_callbacks = self._setup_graph(input, get_cost_fn, get_opt_fn)
self.register_callback(input_callbacks + train_callbacks)
@abstractmethod
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
"""
Implement the logic to build the graph, with an :class:`InputSource`
that's been setup already.
Returns:
[Callback]: list of callbacks needed
"""
def _setup_input(self, input_signature, input):
assert not input.setup_done()
return input.setup(input_signature)
def _make_get_grad_fn(self, input, get_cost_fn, get_opt_fn):
"""
Internal use only.
Returns:
a get_grad_fn for GraphBuilder to use.
"""
assert input.setup_done()
def get_grad_fn():
ctx = get_current_tower_context()
inputs = input.get_input_tensors()
def compute_grad_from_inputs(*inputs):
cost = get_cost_fn(*inputs)
assert isinstance(cost, tf.Tensor), \
"Expect the given function to return a cost, but got {} instead".format(str(cost))
assert cost.shape.ndims == 0, "Cost must be a scalar, but found {}!".format(cost)
if not ctx.is_training:
return None # this is the tower function, could be called for inference
if ctx.has_own_variables:
varlist = ctx.get_collection_in_tower(tfv1.GraphKeys.TRAINABLE_VARIABLES)
else:
varlist = tfv1.trainable_variables()
opt = get_opt_fn()
if is_tfv2() and isinstance(opt, tf.optimizers.Optimizer):
grads = opt.get_gradients(cost, varlist)
grads = list(zip(grads, varlist))
else:
grads = opt.compute_gradients(
cost, var_list=varlist,
gate_gradients=self.GATE_GRADIENTS,
colocate_gradients_with_ops=self.COLOCATE_GRADIENTS_WITH_OPS,
aggregation_method=self.AGGREGATION_METHOD)
grads = FilterNoneGrad().process(grads)
return grads
if not self.XLA_COMPILE:
return compute_grad_from_inputs(*inputs)
else:
try:
from tensorflow.contrib.compiler import xla # deprecated
except ImportError:
from tensorflow.python.compiler.xla import xla
def xla_func():
grads = compute_grad_from_inputs(*inputs)
# unpack, because the return value
# of xla function cannot have nested structure
grads = [x[0] for x in grads]
return grads
grads_no_vars = xla.compile(xla_func)
if ctx.has_own_variables:
varlist = ctx.get_collection_in_tower(tf.GraphKeys.TRAINABLE_VARIABLES)
else:
varlist = tf.trainable_variables()
return list(zip(grads_no_vars, varlist))
return get_grad_fn
| 11,428 | 37.352349 | 112 | py |
SyNet | SyNet-master/tensorpack/tensorpack/train/trainers.py | # -*- coding: utf-8 -*-
# File: trainers.py
import multiprocessing as mp
import os
import sys
import tensorflow as tf
from ..callbacks import CallbackFactory, RunOp
from ..graph_builder.distributed import DistributedParameterServerBuilder, DistributedReplicatedBuilder
from ..graph_builder.training import (
AsyncMultiGPUBuilder, SyncMultiGPUParameterServerBuilder, SyncMultiGPUReplicatedBuilder)
from ..graph_builder.utils import override_to_local_variable
from ..input_source import FeedfreeInput, QueueInput
from ..tfutils import get_global_step_var
from ..tfutils.distributed import get_distributed_session_creator
from ..tfutils.sesscreate import NewSessionCreator
from ..tfutils.tower import TrainTowerContext
from ..utils import logger
from ..utils.argtools import map_arg
from ..utils.develop import HIDE_DOC, deprecated
from .tower import SingleCostTrainer
__all__ = ['NoOpTrainer', 'SimpleTrainer',
'QueueInputTrainer',
'SyncMultiGPUTrainer',
'SyncMultiGPUTrainerReplicated',
'SyncMultiGPUTrainerParameterServer',
'AsyncMultiGPUTrainer',
'DistributedTrainerParameterServer',
'DistributedTrainerReplicated',
'HorovodTrainer', 'BytePSTrainer']
def _int_to_range(x):
if isinstance(x, int):
assert x > 0, "Argument cannot be {}!".format(x)
return list(range(x))
return x
class SimpleTrainer(SingleCostTrainer):
"""
Single-GPU single-cost single-tower trainer.
"""
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
logger.info("Building graph for a single training tower ...")
with TrainTowerContext(''):
grads = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)()
opt = get_opt_fn()
self.train_op = opt.apply_gradients(grads, name='train_op')
return []
class NoOpTrainer(SimpleTrainer):
"""
A special trainer that builds the graph (if given a tower function)
and does nothing in each step.
It is used to only run the callbacks.
Note that `steps_per_epoch` and `max_epochs` are still valid options.
"""
def run_step(self):
self.hooked_sess.run([])
# Only exists for type check & back-compatibility
class QueueInputTrainer(SimpleTrainer):
@deprecated("SimpleTrainer is sufficient!", "2019-12-31")
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
assert isinstance(input, QueueInput), input
return super(QueueInputTrainer, self)._setup_graph(input, get_cost_fn, get_opt_fn)
class SyncMultiGPUTrainerParameterServer(SingleCostTrainer):
__doc__ = SyncMultiGPUParameterServerBuilder.__doc__ + """
Attributes:
devices (list[int]): List of GPU ids.
"""
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, ps_device=None):
"""
Args:
gpus ([int]): list of GPU ids.
ps_device: either 'gpu' or 'cpu', where variables are stored.
The default value is subject to change.
"""
self.devices = gpus
if ps_device is None:
ps_device = 'gpu' if len(gpus) <= 2 else 'cpu'
self._builder = SyncMultiGPUParameterServerBuilder(gpus, ps_device)
super(SyncMultiGPUTrainerParameterServer, self).__init__()
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
if len(self.devices) > 1:
assert isinstance(input, FeedfreeInput), input
tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)
grad_list = self._builder.call_for_each_tower(tower_fn)
self.train_op = self._builder.build(grad_list, get_opt_fn)
return []
def SyncMultiGPUTrainer(gpus):
"""
Return a default multi-GPU trainer, if you don't care about the details.
It may not be the most efficient one for your task.
Args:
gpus (list[int]): list of GPU ids.
"""
return SyncMultiGPUTrainerParameterServer(gpus, ps_device='cpu')
class AsyncMultiGPUTrainer(SingleCostTrainer):
__doc__ = AsyncMultiGPUBuilder.__doc__ + """
Attributes:
devices (list[int]): List of GPU ids.
"""
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, scale_gradient=True):
"""
Args:
gpus ([int]): list of GPU ids.
scale_gradient (bool): if True, will scale each gradient by ``1.0/nr_gpu``.
"""
self.devices = gpus
self._builder = AsyncMultiGPUBuilder(gpus, scale_gradient)
super(AsyncMultiGPUTrainer, self).__init__()
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
if len(self.devices) > 1:
assert isinstance(input, FeedfreeInput), input
tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)
grad_list = self._builder.call_for_each_tower(tower_fn)
self.train_op = self._builder.build(grad_list, get_opt_fn)
return []
class SyncMultiGPUTrainerReplicated(SingleCostTrainer):
__doc__ = SyncMultiGPUReplicatedBuilder.__doc__ + """
Attributes:
devices (list[int]): List of GPU ids.
BROADCAST_EVERY_EPOCH (bool):
Whether to broadcast the variables every epoch.
Theoretically this is a no-op (because the variables
are supposed to be in-sync).
But this cheap operation may help prevent
certain numerical issues in practice.
Note that in cases such as BatchNorm, the variables may not be in sync.
"""
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, average=True, mode=None):
"""
Args:
gpus (int or [int]): list of GPU ids.
average (bool): whether to average or sum gradients.
mode (str or None): Gradient aggregation mode.
Supported values: ['nccl', 'hierarchical', 'cpu'].
Default to pick automatically by heuristics.
These modes may have slight (within 5%) differences in speed.
"hierarchical" mode was designed for DGX-like 8GPU machines.
"""
self.devices = gpus
if mode is None:
mode = 'hierarchical' if len(gpus) == 8 else 'nccl'
mode = mode.lower()
self._builder = SyncMultiGPUReplicatedBuilder(gpus, average, mode)
self.BROADCAST_EVERY_EPOCH = True
super(SyncMultiGPUTrainerReplicated, self).__init__()
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
if len(self.devices) > 1:
assert isinstance(input, FeedfreeInput), input
tower_fn = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)
grad_list = self._builder.call_for_each_tower(tower_fn)
self.train_op, post_init_op = self._builder.build(grad_list, get_opt_fn)
if post_init_op is not None:
cb = RunOp(
post_init_op,
run_before=True,
run_as_trigger=self.BROADCAST_EVERY_EPOCH,
verbose=True)
cb.name_scope = "SyncVariables"
return [cb]
else:
return []
class DistributedTrainerBase(SingleCostTrainer):
devices = None
def __init__(self, gpus, server):
super(DistributedTrainerBase, self).__init__()
self.devices = gpus
self.server = server
self.job_name = server.server_def.job_name
logger.info("Distributed training on cluster:\n" + str(server.server_def.cluster))
def join(self):
logger.info("Calling server.join() on {}:{}".format(self.job_name, self.server.server_def.task_index))
logger.info("Kill me with 'kill {}'".format(os.getpid()))
self.server.join() # this function will never return tensorflow#4713
raise RuntimeError("This is a bug. Server.join() for should never return!")
@HIDE_DOC
def initialize(self, session_creator, session_init):
if not isinstance(session_creator, NewSessionCreator) or \
session_creator.user_provided_config:
raise ValueError(
"You are not allowed to set session_creator or session_config for distributed training! "
"To use a custom session config, pass it to tf.train.Server.")
super(DistributedTrainerBase, self).initialize(
get_distributed_session_creator(self.server), session_init)
# This is slow. deprecated in favor of horovod
class DistributedTrainerParameterServer(DistributedTrainerBase):
__doc__ = DistributedParameterServerBuilder.__doc__
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, server, caching_device='cpu'):
"""
Args:
gpus ([int]): list of GPU ids.
server (tf.train.Server): the server with ps and workers.
caching_device (str): either 'cpu' or 'gpu'. The device to cache variables copied from PS
"""
super(DistributedTrainerParameterServer, self).__init__(gpus, server)
assert self.job_name in ['ps', 'worker'], self.job_name
if self.job_name == 'ps':
self.join()
self._builder = DistributedParameterServerBuilder(gpus, server, caching_device)
self.is_chief = self._builder.is_chief
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
assert isinstance(input, FeedfreeInput), input
self.train_op = self._builder.build(
self._make_get_grad_fn(input, get_cost_fn, get_opt_fn), get_opt_fn)
return []
# This is slow. deprecated in favor of horovod
class DistributedTrainerReplicated(DistributedTrainerBase):
__doc__ = DistributedReplicatedBuilder.__doc__
@map_arg(gpus=_int_to_range)
def __init__(self, gpus, server):
"""
Args:
gpus (list[int]): list of GPU ids.
server (tf.train.Server): the server with ps and workers.
"""
super(DistributedTrainerReplicated, self).__init__(gpus, server)
assert self.job_name in ['ps', 'worker'], self.job_name
if self.job_name == 'ps':
self.join()
self._builder = DistributedReplicatedBuilder(gpus, server)
self.is_chief = self._builder.is_chief
def _setup_input(self, input_signature, input):
with override_to_local_variable():
get_global_step_var() # gs should be local
# input source may create variables (queue size summary)
# TODO This is not good because we don't know from here
# whether something should be global or local. We now assume
# they should be local.
assert not input.setup_done()
return input.setup(input_signature)
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
assert isinstance(input, FeedfreeInput), input
self.train_op, initial_sync_op, model_sync_op = self._builder.build(
self._make_get_grad_fn(input, get_cost_fn, get_opt_fn), get_opt_fn)
callbacks = []
# Initial syncing vars from PS
cb = RunOp(lambda: initial_sync_op,
run_before=True, run_as_trigger=False, verbose=True)
cb.chief_only = False
callbacks.append(cb)
# Sync model_variables to PS, only chief needs to do this
if model_sync_op:
cb = RunOp(lambda: model_sync_op,
run_before=False, run_as_trigger=True, verbose=True)
logger.warn("For efficiency, local MODEL_VARIABLES are only synced to PS once "
"every epoch. Be careful if you save the model more frequently than this.")
callbacks.append(cb)
return callbacks
@property
def _main_tower_vs_name(self):
return "tower0"
class HorovodTrainer(SingleCostTrainer):
"""
Horovod trainer, support both multi-GPU and distributed training.
To use for multi-GPU training:
.. code-block:: bash
# First, change trainer to HorovodTrainer(), then
CUDA_VISIBLE_DEVICES=0,1,2,3 NCCL_DEBUG=INFO mpirun -np 4 --output-filename mylog python train.py
To use for distributed training:
.. code-block:: bash
# First, change trainer to HorovodTrainer(), then
mpirun -np 8 -H server1:4,server2:4 \\
-bind-to none -map-by slot \\
--output-filename mylog -x NCCL_DEBUG=INFO -x LD_LIBRARY_PATH \\
python train.py
# Add other environment variables you need by -x, e.g. PYTHONPATH, PATH.
# If using all GPUs, you can always skip the `CUDA_VISIBLE_DEVICES` option.
# There are other MPI options that can potentially improve performance especially on special hardwares.
Horovod can also be launched without MPI. See
`its documentation <https://github.com/horovod/horovod#running-horovod>`_
for more details.
Note:
1. To reach the maximum speed in your system, there are many options to tune
for Horovod installation and in the MPI command line.
See Horovod docs for details.
2. Due to a TF bug (#8136), you must not initialize CUDA context before the trainer starts training.
Therefore TF functions like `is_gpu_available()` or `list_local_devices()`
must be avoided.
You can, however, use `tf.config.experimental.list_physical_devices('GPU')`, introduced in TF 1.14.
3. Horovod supports both MPI and gloo. There are a few drawbacks of the MPI backend:
+ MPI does not like `fork()`. If your code (e.g. dataflow) contains multiprocessing, it may cause problems.
+ MPI sometimes fails to kill all processes in the end. Be sure to check it afterwards.
4. Keep in mind that there is one process running the script per GPU, therefore:
+ Make sure your InputSource has reasonable randomness.
+ If your data processing is heavy, doing it in a single dedicated process might be
a better choice than doing them repeatedly in each process.
+ You need to make sure log directories in each process won't conflict.
You can set it only for the chief process, or set a different one for each process.
+ Callbacks have an option to be run only in the chief process, or in all processes.
See :meth:`Callback.set_chief_only()`. Most callbacks have a reasonable
default already, but certain callbacks may need your customization.
Report an issue if you find any bad defaults.
+ You can use Horovod API such as `hvd.rank()` to know which process you are and choose
different code path. Chief process has rank 0.
5. Due to these caveats, see
`ResNet-Horovod <https://github.com/tensorpack/benchmarks/tree/master/ResNet-Horovod>`_
for a full example which has handled these common issues.
This example can train ImageNet in roughly an hour following the paper's setup.
Attributes:
BROADCAST_EVERY_EPOCH (bool):
Whether to broadcast the variables every epoch.
Theoretically this is a no-op (because the variables
are supposed to be in-sync).
But this cheap operation may help prevent certain numerical issues in practice.
Note that in cases such as BatchNorm, the variables may not be in sync.
"""
def __init__(self, average=True, compression=None):
"""
Args:
average (bool): whether to average or sum the gradients across processes.
compression: `hvd.Compression.fp16` or `hvd.Compression.none`
"""
if 'pyarrow' in sys.modules:
logger.warn("Horovod and pyarrow may conflict due to pyarrow bugs.")
# lazy import
import horovod.tensorflow as hvd
import horovod
hvd_version = tuple(map(int, horovod.__version__.split('.')[:3]))
self.hvd = hvd
hvd.init()
self.is_chief = hvd.rank() == 0
self._local_rank = hvd.local_rank()
self._rank = hvd.rank()
self._average = average
self._compression = compression
self._has_compression = hvd_version >= (0, 15, 0)
logger.info("[HorovodTrainer] local rank={}".format(self._local_rank))
super(HorovodTrainer, self).__init__()
self.BROADCAST_EVERY_EPOCH = True
def mpi_enabled(self):
"""
Returns:
bool: whether hvd is currently running under MPI
"""
try:
return self.hvd.mpi_enabled()
except AttributeError:
return False
def allreduce(self, grads):
if self.hvd.size() == 1:
return grads
# copied from https://github.com/uber/horovod/blob/master/horovod/tensorflow/__init__.py
averaged_gradients = []
with tf.name_scope("AllReduce"):
for grad, var in grads:
if grad is not None:
if self._compression is not None and self._has_compression:
avg_grad = self.hvd.allreduce(grad, average=self._average, compression=self._compression)
else:
avg_grad = self.hvd.allreduce(grad, average=self._average)
averaged_gradients.append((avg_grad, var))
else:
averaged_gradients.append((None, var))
return averaged_gradients
def _setup_graph(self, input, get_cost_fn, get_opt_fn):
with TrainTowerContext(''):
grads = self._make_get_grad_fn(input, get_cost_fn, get_opt_fn)()
grads = self.allreduce(grads)
opt = get_opt_fn()
self.train_op = opt.apply_gradients(grads, name='train_op')
cb = CallbackFactory(
before_train=self.broadcast,
trigger=self.broadcast if self.BROADCAST_EVERY_EPOCH else None
).set_chief_only(False)
return [cb]
def broadcast(self, _):
logger.info("Running broadcast ...")
# the op will be created in initialize()
self.sess.run(self._broadcast_op)
@HIDE_DOC
def initialize(self, session_creator, session_init):
# broadcast_op should be the last setup_graph: it needs to be created
# "right before" the graph is finalized,
# because it needs to capture all the variables (which may be created by callbacks).
self._broadcast_op = self.hvd.broadcast_global_variables(0)
# it's important that our NewSessionCreator does not finalize the graph
if not isinstance(session_creator, NewSessionCreator):
raise ValueError(
"session_creator has to be `NewSessionCreator` for horovod/byteps training! ")
# NOTE It will fail if GPU was already detected before initializing the session
# https://github.com/tensorflow/tensorflow/issues/8136
session_creator.config.gpu_options.visible_device_list = str(self._local_rank)
try:
session_creator.config.inter_op_parallelism_threads = mp.cpu_count() // self.hvd.local_size()
except AttributeError: # old horovod does not have local_size
pass
super(HorovodTrainer, self).initialize(session_creator, session_init)
# This broadcast belongs to the "intialize" stage
# It should not be delayed to the "before_train" stage.
# TODO:
# 1. a allgather helper to concat strings
# 2. check variables on each rank match each other, print warnings, and broadcast the common set.
if self.is_chief:
logger.info("Broadcasting initialized variables ...")
else:
logger.info("Rank {} waiting for initialization broadcasting ...".format(self._rank))
self.sess.run(self._broadcast_op)
class BytePSTrainer(HorovodTrainer):
"""
BytePS trainer. Supports both multi-GPU and distributed training.
It achieves better scalability than horovod in distributed training, if the model is communication
intensive and you have properly set up the machines following its
`best practices <https://github.com/bytedance/byteps/blob/master/docs/best-practice.md>`_
which requires a few extra bandwidth servers than horovod.
To use it, switch the trainer, and refer to BytePS documentation on how to
launch server/scheduler/workers.
Attributes:
hvd (module): the byteps module that contains horovod-compatible APIs
like `rank(),size()`.
This attribute exists so that downstream code that uses these APIs
does not need to worry about which library is being used under the hood.
"""
def __init__(self, average=True):
"""
Args:
average (bool): whether to average or sum the gradients across processes.
"""
import byteps.tensorflow as bps
self.hvd = bps # BytePS has the same interface as Horovod
self.hvd.allreduce = bps.push_pull # https://github.com/bytedance/byteps/issues/8
assert os.environ.get("DMLC_ROLE", None) == "worker"
assert "DMLC_WORKER_ID" in os.environ and "DMLC_NUM_WORKER" in os.environ
bps.init()
self.is_chief = bps.rank() == 0
self._local_rank = bps.local_rank()
self._rank = bps.rank()
self._average = average
self._compression = None
self._has_compression = False
logger.info("[BytePSTrainer] local rank={}".format(self._local_rank))
SingleCostTrainer.__init__(self)
def mpi_enabled(self):
"""
Returns:
bool: whether hvd is currently running under MPI
"""
return False
| 21,673 | 39.062847 | 119 | py |
SyNet | SyNet-master/tensorpack/tensorpack/train/config.py | # -*- coding: utf-8 -*-
# File: config.py
import os
import tensorflow as tf
from ..callbacks import (
JSONWriter, MergeAllSummaries, MovingAverageSummary, ProgressBar, RunUpdateOps, ScalarPrinter, TFEventWriter)
from ..dataflow.base import DataFlow
from ..input_source import InputSource
from ..tfutils.sesscreate import NewSessionCreator
from ..tfutils.sessinit import SaverRestore, SessionInit
from ..utils import logger
from .model_desc import ModelDescBase
__all__ = ['TrainConfig', 'AutoResumeTrainConfig', 'DEFAULT_CALLBACKS', 'DEFAULT_MONITORS']
def DEFAULT_CALLBACKS():
"""
Return the default callbacks,
which will be used in :class:`TrainConfig` and :meth:`Trainer.train_with_defaults`.
They are:
1. MovingAverageSummary()
2. ProgressBar()
3. MergeAllSummaries()
4. RunUpdateOps()
"""
return [
MovingAverageSummary(),
ProgressBar(),
MergeAllSummaries(),
RunUpdateOps()]
def DEFAULT_MONITORS():
"""
Return the default monitors,
which will be used in :class:`TrainConfig` and :meth:`Trainer.train_with_defaults`.
They are:
1. TFEventWriter()
2. JSONWriter()
3. ScalarPrinter()
"""
return [TFEventWriter(), JSONWriter(), ScalarPrinter()]
class TrainConfig(object):
"""
A collection of options to be used for single-cost trainers.
Note that you do not have to use :class:`TrainConfig`.
You can use the API of :class:`Trainer` directly, to have more fine-grained control of the training.
"""
def __init__(self,
dataflow=None, data=None,
model=None,
callbacks=None, extra_callbacks=None, monitors=None,
session_creator=None, session_config=None, session_init=None,
starting_epoch=1, steps_per_epoch=None, max_epoch=99999,
**kwargs):
"""
Args:
dataflow (DataFlow):
data (InputSource):
model (ModelDesc):
callbacks (list[Callback]): a list of :class:`Callback` to use during training.
extra_callbacks (list[Callback]): This argument
is only used to provide the defaults in addition to ``callbacks``.
The list of callbacks that will be used in the end is simply ``callbacks + extra_callbacks``.
It is usually left as None, and the default value for this argument is :func:`DEFAULT_CALLBACKS()`.
You can override it when you don't like any of the default callbacks.
For example, if you'd like to let the progress bar print tensors, you can use
.. code-block:: none
extra_callbacks=[ProgressBar(names=['name']),
MovingAverageSummary(),
MergeAllSummaries(),
RunUpdateOps()]
monitors (list[MonitorBase]): Defaults to :func:`DEFAULT_MONITORS()`.
session_creator (tf.train.SessionCreator): Defaults to :class:`sesscreate.NewSessionCreator()`
with the config returned by :func:`tfutils.get_default_sess_config()`.
session_config (tf.ConfigProto): when session_creator is None, use this to create the session.
session_init (SessionInit): how to initialize variables of a session. Defaults to do nothing.
starting_epoch (int): The index of the first epoch.
steps_per_epoch (int): the number of steps (defined by :meth:`Trainer.run_step`) to run in each epoch.
Defaults to the input data size. You may want to divide it by the #GPUs in multi-GPU training.
Number of steps per epoch only affects the schedule of callbacks.
It does not affect the sequence of input data seen by the model.
max_epoch (int): maximum number of epoch to run training.
"""
# TODO type checker decorator
def assert_type(v, tp, name):
assert isinstance(v, tp), \
"{} has to be type '{}', but an object of type '{}' found.".format(
name, tp.__name__, v.__class__.__name__)
# process data & model
assert data is None or dataflow is None, "dataflow and data cannot be both presented in TrainConfig!"
if dataflow is not None:
assert_type(dataflow, DataFlow, 'dataflow')
if data is not None:
assert_type(data, InputSource, 'data')
self.dataflow = dataflow
self.data = data
if model is not None:
assert_type(model, ModelDescBase, 'model')
self.model = model
if callbacks is not None:
assert_type(callbacks, list, 'callbacks')
self.callbacks = callbacks
if extra_callbacks is not None:
assert_type(extra_callbacks, list, 'extra_callbacks')
self.extra_callbacks = extra_callbacks
if monitors is not None:
assert_type(monitors, list, 'monitors')
self.monitors = monitors
if session_init is not None:
assert_type(session_init, SessionInit, 'session_init')
self.session_init = session_init
if session_creator is None:
if session_config is not None:
self.session_creator = NewSessionCreator(config=session_config)
else:
self.session_creator = NewSessionCreator(config=None)
else:
self.session_creator = session_creator
assert session_config is None, "Cannot set both session_creator and session_config!"
if steps_per_epoch is None:
try:
if dataflow is not None:
steps_per_epoch = len(dataflow)
elif data is not None:
steps_per_epoch = data.size()
else:
raise NotImplementedError()
except NotImplementedError:
logger.error("You must set `TrainConfig(steps_per_epoch)` if the size of your input is not available.")
raise
else:
steps_per_epoch = int(steps_per_epoch)
self.steps_per_epoch = steps_per_epoch
self.starting_epoch = int(starting_epoch)
self.max_epoch = int(max_epoch)
class AutoResumeTrainConfig(TrainConfig):
"""
Same as :class:`TrainConfig`, but does the following to automatically
resume from training:
1. If a checkpoint was found in :meth:`logger.get_logger_dir()`, set
`session_init` option to load it.
2. If a JSON history was found in :meth:`logger.get_logger_dir()`, try to
load the epoch number from it and set the `starting_epoch` option to
continue training.
You can choose to let the above two option to either overwrite or
not overwrite user-provided arguments, as explained below.
Note that the functionality requires the logging directory to obtain
necessary information from a previous run.
If you have unconventional setup of logging directory, this class will not
work for you, for example:
1. If you save the checkpoint to a different directory rather than the
logging directory.
2. If in distributed training the directory is not
available to every worker, or the directories are different for different workers.
"""
def __init__(self, always_resume=True, **kwargs):
"""
Args:
always_resume (bool): If False, user-provided arguments
`session_init` and `starting_epoch` will take priority.
Otherwise, resume will take priority.
kwargs: same as in :class:`TrainConfig`.
Note:
The main goal of this class is to let a training job resume
without changing any line of code or command line arguments.
So it's useful to let resume take priority over user-provided arguments sometimes.
For example: if your training starts from a pre-trained model,
you would want it to use user-provided model loader at the
beginning, but a "resume" model loader when the job was
interrupted and restarted.
"""
found_sessinit = False
if always_resume or 'session_init' not in kwargs:
sessinit = self.get_sessinit_resume()
if sessinit is not None:
found_sessinit = True
path = sessinit.path
if 'session_init' in kwargs:
logger.info("Found checkpoint at {}. "
"session_init arguments will be overwritten.".format(path))
else:
logger.info("Will load checkpoint at {}.".format(path))
kwargs['session_init'] = sessinit
found_last_epoch = False
if always_resume or 'starting_epoch' not in kwargs:
last_epoch = JSONWriter.load_existing_epoch_number()
if last_epoch is not None:
found_last_epoch = True
now_epoch = last_epoch + 1
logger.info("Found history statistics from JSON. "
"Setting starting_epoch to {}.".format(now_epoch))
kwargs['starting_epoch'] = now_epoch
assert found_sessinit == found_last_epoch, \
"Found SessionInit={}, Found Last Epoch={}".format(found_sessinit, found_last_epoch)
super(AutoResumeTrainConfig, self).__init__(**kwargs)
@staticmethod
def get_sessinit_resume(dir=None):
if dir is None:
dir = logger.get_logger_dir()
if not dir:
return None
path = os.path.join(dir, 'checkpoint')
if not tf.gfile.Exists(path):
return None
return SaverRestore(path)
| 9,874 | 39.471311 | 119 | py |
SyNet | SyNet-master/tensorpack/tensorpack/train/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
# flake8: noqa
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .base import *
from .config import *
from .interface import *
from .tower import *
from .trainers import *
from pkgutil import iter_modules
import os
import os.path
__all__ = []
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else []
if lst:
del globals()[name]
for k in lst:
globals()[k] = p.__dict__[k]
__all__.append(k)
_CURR_DIR = os.path.dirname(__file__)
_SKIP = ['utility']
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if module_name.startswith('_'):
continue
if module_name not in _SKIP:
global_import(module_name)
| 1,079 | 23.545455 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/train/interface.py | # -*- coding: utf-8 -*-
# File: interface.py
from ..compat import tfv1
from ..input_source import FeedInput, InputSource, QueueInput, StagingInput
from ..utils import logger
from ..compat import is_tfv2
from .config import TrainConfig
from .tower import SingleCostTrainer
from .trainers import SimpleTrainer
__all__ = ['launch_train_with_config']
def apply_default_prefetch(input_source_or_dataflow, trainer):
"""
Apply a set of default rules to make a fast :class:`InputSource`.
Args:
input_source_or_dataflow(InputSource | DataFlow):
trainer (Trainer):
Returns:
InputSource
"""
if not isinstance(input_source_or_dataflow, InputSource):
# to mimic same behavior of the old trainer interface
if type(trainer) == SimpleTrainer:
input = FeedInput(input_source_or_dataflow)
else:
logger.info("Automatically applying QueueInput on the DataFlow.")
input = QueueInput(input_source_or_dataflow)
else:
input = input_source_or_dataflow
if hasattr(trainer, 'devices'):
towers = trainer.devices
if len(towers) > 1: # seem to only help on >1 GPUs
assert not isinstance(trainer, SimpleTrainer)
if isinstance(input, QueueInput):
logger.info("Automatically applying StagingInput on the DataFlow.")
input = StagingInput(input)
return input
def launch_train_with_config(config, trainer):
"""
Train with a :class:`TrainConfig` and a :class:`Trainer`, to
present the simple and old training interface. It basically does the following
3 things (and you can easily do them by yourself if you need more control):
1. Setup the input with automatic prefetching heuristics,
from `config.data` or `config.dataflow`.
2. Call `trainer.setup_graph` with the input as well as `config.model`.
3. Call `trainer.train` with rest of the attributes of config.
See the `related tutorial
<https://tensorpack.readthedocs.io/tutorial/training-interface.html#with-modeldesc-and-trainconfig>`_
to learn more.
Args:
config (TrainConfig):
trainer (Trainer): an instance of :class:`SingleCostTrainer`.
Example:
.. code-block:: python
launch_train_with_config(
config, SyncMultiGPUTrainerParameterServer(8, ps_device='gpu'))
"""
if is_tfv2():
tfv1.disable_eager_execution()
assert isinstance(trainer, SingleCostTrainer), trainer
assert isinstance(config, TrainConfig), config
assert config.model is not None
assert config.dataflow is not None or config.data is not None
model = config.model
input = config.data or config.dataflow
input = apply_default_prefetch(input, trainer)
# This is the only place where the `ModelDesc` abstraction is useful.
# We should gradually stay away from this unuseful abstraction.
# TowerFunc is a better abstraction (similar to tf.function in the future)
trainer.setup_graph(
model.get_input_signature(), input,
model.build_graph, model.get_optimizer)
_check_unused_regularization()
trainer.train_with_defaults(
callbacks=config.callbacks,
monitors=config.monitors,
session_creator=config.session_creator,
session_init=config.session_init,
steps_per_epoch=config.steps_per_epoch,
starting_epoch=config.starting_epoch,
max_epoch=config.max_epoch,
extra_callbacks=config.extra_callbacks)
def _check_unused_regularization():
coll = tfv1.get_collection(tfv1.GraphKeys.REGULARIZATION_LOSSES)
unconsumed_reg = []
for c in coll:
if len(c.consumers()) == 0:
unconsumed_reg.append(c)
if unconsumed_reg:
logger.warn("The following tensors appear in REGULARIZATION_LOSSES collection but have no "
"consumers! You may have forgotten to add regularization to total cost.")
logger.warn("Unconsumed regularization: {}".format(', '.join([x.name for x in unconsumed_reg])))
| 4,079 | 35.428571 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/train/model_desc.py | # -*- coding: utf-8 -*-
# File: model_desc.py
import tensorflow as tf
from ..utils.argtools import memoized_method
from ..tfutils.common import get_op_tensor_name
from ..tfutils.tower import get_current_tower_context
from ..compat import backport_tensor_spec, tfv1
TensorSpec = backport_tensor_spec()
__all__ = ['ModelDesc', 'ModelDescBase']
class ModelDescBase(object):
"""
Base class for a model description.
It is used for the simple training interface described in
`Training Interface Tutorial <https://tensorpack.readthedocs.io/tutorial/training-interface.html>`_.
Subclass is expected to implement :meth:`inputs` and :meth:`build_graph`, as they
together define a tower function.
"""
@memoized_method
def get_input_signature(self):
"""
Returns:
A list of :class:`tf.TensorSpec`, which describes the inputs of this model.
The result is cached for each instance of :class:`ModelDescBase`.
"""
with tf.Graph().as_default() as G: # create these placeholder in a temporary graph
inputs = self.inputs()
assert isinstance(inputs, (list, tuple)), \
"ModelDesc.inputs() should return a list of tf.TensorSpec objects! Got {} instead.".format(str(inputs))
if isinstance(inputs[0], tf.Tensor):
for p in inputs:
assert "Placeholder" in p.op.type, \
"inputs() have to return TensorSpec or placeholders! Found {} instead.".format(p)
assert p.graph == G, "Placeholders returned by inputs() should be created inside inputs()!"
return [TensorSpec(shape=p.shape, dtype=p.dtype, name=get_op_tensor_name(p.name)[0]) for p in inputs]
@property
def input_names(self):
"""
list[str]: the names of all the inputs.
"""
return [k.name for k in self.get_input_signature()]
def inputs(self):
"""
A subclass is expected to implement this method.
If returning placeholders,
the placeholders **have to** be created inside this method.
Don't return placeholders created in other places.
Also, users should never call this method by yourself.
Returns:
list[tf.TensorSpec or tf.placeholder].
"""
raise NotImplementedError()
def build_graph(self, *args):
"""
A subclass is expected to implement this method.
Build the whole symbolic graph.
This is supposed to be part of the "tower function" when used with :class:`TowerTrainer`.
Args:
args ([tf.Tensor]): tensors that matches the list of inputs defined by ``inputs()``.
Returns:
In general it returns nothing, but a subclass
may require it to return necessary information to build the trainer.
For example, `SingleCostTrainer` expect this method to return the cost tensor.
"""
raise NotImplementedError()
@property
def training(self):
"""
bool: whether the caller is under a training context or not.
"""
return get_current_tower_context().is_training
class ModelDesc(ModelDescBase):
"""
One subclass of :class:`ModelDescBase` with the assupmtion of
**single cost** and **single optimizer** training.
It has the following constraints in addition to :class:`ModelDescBase`:
1. `build_graph(...)` method should return a cost tensor when called under a training context.
The cost will be the final cost to be optimized by the optimizer.
Therefore it should include necessary regularization.
2. Subclass is expected to implement :meth:`optimizer()` method.
"""
@memoized_method
def get_optimizer(self):
"""
Return the memoized optimizer returned by `optimizer()`.
Users of :class:`ModelDesc` will need to implement `optimizer()`,
which will only be called once per each model.
Returns:
a :class:`tf.train.Optimizer` instance.
"""
ret = self.optimizer()
assert isinstance(ret, tfv1.train.Optimizer), \
"ModelDesc.optimizer() must return a tf.train.Optimizer! Got {} instead.".format(str(ret))
return ret
def optimizer(self):
"""
A subclass is expected to implement this method.
Returns:
a `tf.train.Optimizer` instance.
"""
raise NotImplementedError()
| 4,521 | 33.519084 | 119 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/concurrency.py | # -*- coding: utf-8 -*-
# File: concurrency.py
# Some code taken from zxytim
import sys
import atexit
import bisect
import multiprocessing as mp
import platform
import signal
import threading
import weakref
from contextlib import contextmanager
import six
from six.moves import queue
import subprocess
from . import logger
from .argtools import log_once
__all__ = ['StoppableThread', 'LoopThread', 'ShareSessionThread',
'ensure_proc_terminate',
'start_proc_mask_signal']
class StoppableThread(threading.Thread):
"""
A thread that has a 'stop' event.
"""
def __init__(self, evt=None):
"""
Args:
evt(threading.Event): if None, will create one.
"""
super(StoppableThread, self).__init__()
if evt is None:
evt = threading.Event()
self._stop_evt = evt
def stop(self):
""" Stop the thread"""
self._stop_evt.set()
def stopped(self):
"""
Returns:
bool: whether the thread is stopped or not
"""
return self._stop_evt.isSet()
def queue_put_stoppable(self, q, obj):
""" Put obj to queue, but will give up when the thread is stopped"""
while not self.stopped():
try:
q.put(obj, timeout=5)
break
except queue.Full:
pass
def queue_get_stoppable(self, q):
""" Take obj from queue, but will give up when the thread is stopped"""
while not self.stopped():
try:
return q.get(timeout=5)
except queue.Empty:
pass
class LoopThread(StoppableThread):
""" A pausable thread that simply runs a loop"""
def __init__(self, func, pausable=True):
"""
Args:
func: the function to run
"""
super(LoopThread, self).__init__()
self._func = func
self._pausable = pausable
if pausable:
self._lock = threading.Lock()
self.daemon = True
def run(self):
while not self.stopped():
if self._pausable:
self._lock.acquire()
self._lock.release()
self._func()
def pause(self):
""" Pause the loop """
assert self._pausable
self._lock.acquire()
def resume(self):
""" Resume the loop """
assert self._pausable
self._lock.release()
class ShareSessionThread(threading.Thread):
""" A wrapper around thread so that the thread
uses the default session at "start()" time.
"""
def __init__(self, th=None):
"""
Args:
th (threading.Thread or None):
"""
super(ShareSessionThread, self).__init__()
if th is not None:
assert isinstance(th, threading.Thread), th
self._th = th
self.name = th.name
self.daemon = th.daemon
@contextmanager
def default_sess(self):
if self._sess:
with self._sess.as_default():
yield self._sess
else:
logger.warn("ShareSessionThread {} wasn't under a default session!".format(self.name))
yield None
def start(self):
from ..compat import tfv1
self._sess = tfv1.get_default_session()
super(ShareSessionThread, self).start()
def run(self):
if not self._th:
raise NotImplementedError()
with self._sess.as_default():
self._th.run()
class DIE(object):
""" A placeholder class indicating end of queue """
pass
def ensure_proc_terminate(proc):
"""
Make sure processes terminate when main process exit.
Args:
proc (multiprocessing.Process or list)
"""
if isinstance(proc, list):
for p in proc:
ensure_proc_terminate(p)
return
def stop_proc_by_weak_ref(ref):
proc = ref()
if proc is None:
return
if not proc.is_alive():
return
proc.terminate()
proc.join()
assert isinstance(proc, mp.Process)
atexit.register(stop_proc_by_weak_ref, weakref.ref(proc))
def enable_death_signal(_warn=True):
"""
Set the "death signal" of the current process, so that
the current process will be cleaned with guarantee
in case the parent dies accidentally.
"""
if platform.system() != 'Linux':
return
try:
import prctl # pip install python-prctl
except ImportError:
if _warn:
log_once('"import prctl" failed! Install python-prctl so that processes can be cleaned with guarantee.',
'warn')
return
else:
assert hasattr(prctl, 'set_pdeathsig'), \
"prctl.set_pdeathsig does not exist! Note that you need to install 'python-prctl' instead of 'prctl'."
# is SIGHUP a good choice?
prctl.set_pdeathsig(signal.SIGHUP)
def is_main_thread():
if six.PY2:
return isinstance(threading.current_thread(), threading._MainThread)
else:
# a nicer solution with py3
return threading.current_thread() == threading.main_thread()
@contextmanager
def mask_sigint():
"""
Returns:
If called in main thread, returns a context where ``SIGINT`` is ignored, and yield True.
Otherwise yield False.
"""
if is_main_thread():
sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)
yield True
signal.signal(signal.SIGINT, sigint_handler)
else:
yield False
def start_proc_mask_signal(proc):
"""
Start process(es) with SIGINT ignored.
Args:
proc: (mp.Process or list)
Note:
The signal mask is only applied when called from main thread.
"""
if not isinstance(proc, list):
proc = [proc]
with mask_sigint():
for p in proc:
if isinstance(p, mp.Process):
if sys.version_info < (3, 4) or mp.get_start_method() == 'fork':
log_once("""
Starting a process with 'fork' method is efficient but not safe and may cause deadlock or crash.
Use 'forkserver' or 'spawn' method instead if you run into such issues.
See https://docs.python.org/3/library/multiprocessing.html#contexts-and-start-methods on how to set them.
""".replace("\n", ""),
'warn') # noqa
p.start()
def subproc_call(cmd, timeout=None):
"""
Execute a command with timeout, and return STDOUT and STDERR
Args:
cmd(str): the command to execute.
timeout(float): timeout in seconds.
Returns:
output(bytes), retcode(int). If timeout, retcode is -1.
"""
try:
output = subprocess.check_output(
cmd, stderr=subprocess.STDOUT,
shell=True, timeout=timeout)
return output, 0
except subprocess.TimeoutExpired as e:
logger.warn("Command '{}' timeout!".format(cmd))
if e.output:
logger.warn(e.output.decode('utf-8'))
return e.output, -1
else:
return "", -1
except subprocess.CalledProcessError as e:
logger.warn("Command '{}' failed, return code={}".format(cmd, e.returncode))
logger.warn(e.output.decode('utf-8'))
return e.output, e.returncode
except Exception:
logger.warn("Command '{}' failed to run.".format(cmd))
return "", -2
class OrderedContainer(object):
"""
Like a queue, but will always wait to receive item with rank
(x+1) and produce (x+1) before producing (x+2).
Warning:
It is not thread-safe.
"""
def __init__(self, start=0):
"""
Args:
start(int): the starting rank.
"""
self.ranks = []
self.data = []
self.wait_for = start
def put(self, rank, val):
"""
Args:
rank(int): rank of th element. All elements must have different ranks.
val: an object
"""
idx = bisect.bisect(self.ranks, rank)
self.ranks.insert(idx, rank)
self.data.insert(idx, val)
def has_next(self):
if len(self.ranks) == 0:
return False
return self.ranks[0] == self.wait_for
def get(self):
assert self.has_next()
ret = self.data[0]
rank = self.ranks[0]
del self.ranks[0]
del self.data[0]
self.wait_for += 1
return rank, ret
class OrderedResultGatherProc(mp.Process):
"""
Gather indexed data from a data queue, and produce results with the
original index-based order.
"""
def __init__(self, data_queue, nr_producer, start=0):
"""
Args:
data_queue(mp.Queue): a queue which contains datapoints.
nr_producer(int): number of producer processes. This process will
terminate after receiving this many of :class:`DIE` sentinel.
start(int): the rank of the first object
"""
super(OrderedResultGatherProc, self).__init__()
self.data_queue = data_queue
self.ordered_container = OrderedContainer(start=start)
self.result_queue = mp.Queue()
self.nr_producer = nr_producer
def run(self):
nr_end = 0
try:
while True:
task_id, data = self.data_queue.get()
if task_id == DIE:
self.result_queue.put((task_id, data))
nr_end += 1
if nr_end == self.nr_producer:
return
else:
self.ordered_container.put(task_id, data)
while self.ordered_container.has_next():
self.result_queue.put(self.ordered_container.get())
except Exception as e:
import traceback
traceback.print_exc()
raise e
def get(self):
return self.result_queue.get()
| 9,998 | 26.852368 | 116 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/timer.py | # -*- coding: utf-8 -*-
# File: timer.py
import atexit
from collections import defaultdict
from contextlib import contextmanager
from time import perf_counter as timer # noqa
from . import logger
from .stats import StatCounter
__all__ = ['timed_operation', 'IterSpeedCounter', 'Timer']
@contextmanager
def timed_operation(msg, log_start=False):
"""
Surround a context with a timer.
Args:
msg(str): the log to print.
log_start(bool): whether to print also at the beginning.
Example:
.. code-block:: python
with timed_operation('Good Stuff'):
time.sleep(1)
Will print:
.. code-block:: python
Good stuff finished, time:1sec.
"""
assert len(msg)
if log_start:
logger.info('Start {} ...'.format(msg))
start = timer()
yield
msg = msg[0].upper() + msg[1:]
logger.info('{} finished, time:{:.4f} sec.'.format(
msg, timer() - start))
_TOTAL_TIMER_DATA = defaultdict(StatCounter)
@contextmanager
def total_timer(msg):
""" A context which add the time spent inside to the global TotalTimer. """
start = timer()
yield
t = timer() - start
_TOTAL_TIMER_DATA[msg].feed(t)
def print_total_timer():
"""
Print the content of the global TotalTimer, if it's not empty. This function will automatically get
called when program exits.
"""
if len(_TOTAL_TIMER_DATA) == 0:
return
for k, v in _TOTAL_TIMER_DATA.items():
logger.info("Total Time: {} -> {:.2f} sec, {} times, {:.3g} sec/time".format(
k, v.sum, v.count, v.average))
atexit.register(print_total_timer)
class IterSpeedCounter(object):
""" Test how often some code gets reached.
Example:
Print the speed of the iteration every 100 times.
.. code-block:: python
speed = IterSpeedCounter(100)
for k in range(1000):
# do something
speed()
"""
def __init__(self, print_every, name=None):
"""
Args:
print_every(int): interval to print.
name(str): name to used when print.
"""
self.cnt = 0
self.print_every = int(print_every)
self.name = name if name else 'IterSpeed'
def reset(self):
self.start = timer()
def __call__(self):
if self.cnt == 0:
self.reset()
self.cnt += 1
if self.cnt % self.print_every != 0:
return
t = timer() - self.start
logger.info("{}: {:.2f} sec, {} times, {:.3g} sec/time".format(
self.name, t, self.cnt, t / self.cnt))
class Timer():
"""
A timer class which computes the time elapsed since the start/reset of the timer.
"""
def __init__(self):
self.reset()
def reset(self):
"""
Reset the timer.
"""
self._start = timer()
self._paused = False
self._total_paused = 0
def pause(self):
"""
Pause the timer.
"""
assert self._paused is False
self._paused = timer()
def is_paused(self):
return self._paused is not False
def resume(self):
"""
Resume the timer.
"""
assert self._paused is not False
self._total_paused += timer() - self._paused
self._paused = False
def seconds(self):
"""
Returns:
float: the total number of seconds since the start/reset of the timer, excluding the
time in between when the timer is paused.
"""
if self._paused:
self.resume()
self.pause()
return timer() - self._start - self._total_paused
| 3,746 | 23.019231 | 103 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/argtools.py | # -*- coding: utf-8 -*-
# File: argtools.py
import inspect
import functools
from . import logger
__all__ = ['map_arg', 'memoized', 'memoized_method', 'graph_memoized', 'shape2d', 'shape4d',
'memoized_ignoreargs', 'log_once']
def map_arg(**maps):
"""
Apply a mapping on certain argument before calling the original function.
Args:
maps (dict): {argument_name: map_func}
"""
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
# getcallargs was deprecated since 3.5
sig = inspect.signature(func)
argmap = sig.bind_partial(*args, **kwargs).arguments
for k, map_func in maps.items():
if k in argmap:
argmap[k] = map_func(argmap[k])
return func(**argmap)
return wrapper
return deco
memoized = functools.lru_cache(maxsize=None)
""" Alias to :func:`functools.lru_cache`
WARNING: memoization will keep keys and values alive!
"""
def graph_memoized(func):
"""
Like memoized, but keep one cache per default graph.
"""
# TODO it keeps the graph alive
from ..compat import tfv1
GRAPH_ARG_NAME = '__IMPOSSIBLE_NAME_FOR_YOU__'
@memoized
def func_with_graph_arg(*args, **kwargs):
kwargs.pop(GRAPH_ARG_NAME)
return func(*args, **kwargs)
@functools.wraps(func)
def wrapper(*args, **kwargs):
assert GRAPH_ARG_NAME not in kwargs, "No Way!!"
graph = tfv1.get_default_graph()
kwargs[GRAPH_ARG_NAME] = graph
return func_with_graph_arg(*args, **kwargs)
return wrapper
_MEMOIZED_NOARGS = {}
def memoized_ignoreargs(func):
"""
A decorator. It performs memoization ignoring the arguments used to call
the function.
"""
def wrapper(*args, **kwargs):
if func not in _MEMOIZED_NOARGS:
res = func(*args, **kwargs)
_MEMOIZED_NOARGS[func] = res
return res
return _MEMOIZED_NOARGS[func]
return wrapper
def shape2d(a):
"""
Ensure a 2D shape.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 2. if ``a`` is a int, return ``[a, a]``.
"""
if type(a) == int:
return [a, a]
if isinstance(a, (list, tuple)):
assert len(a) == 2
return list(a)
raise RuntimeError("Illegal shape: {}".format(a))
def get_data_format(data_format, keras_mode=True):
if keras_mode:
dic = {'NCHW': 'channels_first', 'NHWC': 'channels_last'}
else:
dic = {'channels_first': 'NCHW', 'channels_last': 'NHWC'}
ret = dic.get(data_format, data_format)
if ret not in dic.values():
raise ValueError("Unknown data_format: {}".format(data_format))
return ret
def shape4d(a, data_format='NHWC'):
"""
Ensuer a 4D shape, to use with 4D symbolic functions.
Args:
a: a int or tuple/list of length 2
Returns:
list: of length 4. if ``a`` is a int, return ``[1, a, a, 1]``
or ``[1, 1, a, a]`` depending on data_format.
"""
s2d = shape2d(a)
if get_data_format(data_format, False) == 'NHWC':
return [1] + s2d + [1]
else:
return [1, 1] + s2d
@memoized
def log_once(message, func='info'):
"""
Log certain message only once. Call this function more than one times with
the same message will result in no-op.
Args:
message(str): message to log
func(str): the name of the logger method. e.g. "info", "warn", "error".
"""
getattr(logger, func)(message)
def call_only_once(func):
"""
Decorate a method or property of a class, so that this method can only
be called once for every instance.
Calling it more than once will result in exception.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
# cannot use hasattr here, because hasattr tries to getattr, which
# fails if func is a property
assert func.__name__ in dir(self), "call_only_once can only be used on method or property!"
if not hasattr(self, '_CALL_ONLY_ONCE_CACHE'):
cache = self._CALL_ONLY_ONCE_CACHE = set()
else:
cache = self._CALL_ONLY_ONCE_CACHE
cls = type(self)
# cannot use ismethod(), because decorated method becomes a function
is_method = inspect.isfunction(getattr(cls, func.__name__))
assert func not in cache, \
"{} {}.{} can only be called once per object!".format(
'Method' if is_method else 'Property',
cls.__name__, func.__name__)
cache.add(func)
return func(*args, **kwargs)
return wrapper
def memoized_method(func):
"""
A decorator that performs memoization on methods. It stores the cache on the object instance itself.
"""
@functools.wraps(func)
def wrapper(*args, **kwargs):
self = args[0]
assert func.__name__ in dir(self), "memoized_method can only be used on method!"
if not hasattr(self, '_MEMOIZED_CACHE'):
cache = self._MEMOIZED_CACHE = {}
else:
cache = self._MEMOIZED_CACHE
key = (func, ) + args[1:] + tuple(kwargs)
ret = cache.get(key, None)
if ret is not None:
return ret
value = func(*args, **kwargs)
cache[key] = value
return value
return wrapper
if __name__ == '__main__':
class A():
def __init__(self):
self._p = 0
@call_only_once
def f(self, x):
print(x)
@property
def p(self):
return self._p
@p.setter
@call_only_once
def p(self, val):
self._p = val
a = A()
a.f(1)
b = A()
b.f(2)
b.f(1)
print(b.p)
print(b.p)
b.p = 2
print(b.p)
b.p = 3
print(b.p)
| 5,918 | 24.734783 | 104 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/palette.py | # -*- coding: utf-8 -*-
# File: palette.py
import numpy as np
__all__ = ['PALETTE_RGB']
# Copied from https://stackoverflow.com/questions/2328339/how-to-generate-n-different-colors-for-any-natural-number-n
PALETTE_HEX = [
"#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059",
"#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87",
"#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80",
"#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100",
"#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F",
"#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09",
"#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66",
"#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C",
"#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81",
"#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00",
"#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700",
"#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329",
"#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C",
"#83AB58", "#001C1E", "#D1F7CE", "#004B28", "#C8D0F6", "#A3A489", "#806C66", "#222800",
"#BF5650", "#E83000", "#66796D", "#DA007C", "#FF1A59", "#8ADBB4", "#1E0200", "#5B4E51",
"#C895C5", "#320033", "#FF6832", "#66E1D3", "#CFCDAC", "#D0AC94",
"#7ED379", "#012C58"]
# Copied from https://github.com/facebookresearch/Detectron/blob/master/detectron/utils/colormap.py
DETECTRON_PALETTE = np.array(
[
0.000, 0.447, 0.741,
0.850, 0.325, 0.098,
0.929, 0.694, 0.125,
0.494, 0.184, 0.556,
0.466, 0.674, 0.188,
0.301, 0.745, 0.933,
0.635, 0.078, 0.184,
0.300, 0.300, 0.300,
0.600, 0.600, 0.600,
1.000, 0.000, 0.000,
1.000, 0.500, 0.000,
0.749, 0.749, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 1.000,
0.667, 0.000, 1.000,
0.333, 0.333, 0.000,
0.333, 0.667, 0.000,
0.333, 1.000, 0.000,
0.667, 0.333, 0.000,
0.667, 0.667, 0.000,
0.667, 1.000, 0.000,
1.000, 0.333, 0.000,
1.000, 0.667, 0.000,
1.000, 1.000, 0.000,
0.000, 0.333, 0.500,
0.000, 0.667, 0.500,
0.000, 1.000, 0.500,
0.333, 0.000, 0.500,
0.333, 0.333, 0.500,
0.333, 0.667, 0.500,
0.333, 1.000, 0.500,
0.667, 0.000, 0.500,
0.667, 0.333, 0.500,
0.667, 0.667, 0.500,
0.667, 1.000, 0.500,
1.000, 0.000, 0.500,
1.000, 0.333, 0.500,
1.000, 0.667, 0.500,
1.000, 1.000, 0.500,
0.000, 0.333, 1.000,
0.000, 0.667, 1.000,
0.000, 1.000, 1.000,
0.333, 0.000, 1.000,
0.333, 0.333, 1.000,
0.333, 0.667, 1.000,
0.333, 1.000, 1.000,
0.667, 0.000, 1.000,
0.667, 0.333, 1.000,
0.667, 0.667, 1.000,
0.667, 1.000, 1.000,
1.000, 0.000, 1.000,
1.000, 0.333, 1.000,
1.000, 0.667, 1.000,
0.167, 0.000, 0.000,
0.333, 0.000, 0.000,
0.500, 0.000, 0.000,
0.667, 0.000, 0.000,
0.833, 0.000, 0.000,
1.000, 0.000, 0.000,
0.000, 0.167, 0.000,
0.000, 0.333, 0.000,
0.000, 0.500, 0.000,
0.000, 0.667, 0.000,
0.000, 0.833, 0.000,
0.000, 1.000, 0.000,
0.000, 0.000, 0.167,
0.000, 0.000, 0.333,
0.000, 0.000, 0.500,
0.000, 0.000, 0.667,
0.000, 0.000, 0.833,
0.000, 0.000, 1.000,
0.000, 0.000, 0.000,
0.143, 0.143, 0.143,
0.286, 0.286, 0.286,
0.429, 0.429, 0.429,
0.571, 0.571, 0.571,
0.714, 0.714, 0.714,
0.857, 0.857, 0.857,
1.000, 1.000, 1.000
]
).astype(np.float32).reshape(-1, 3) * 255
def _parse_hex_color(s):
r = int(s[1:3], 16)
g = int(s[3:5], 16)
b = int(s[5:7], 16)
return (r, g, b)
# PALETTE_RGB = np.asarray(
# list(map(_parse_hex_color, PALETTE_HEX)),
# dtype='int32')
# This seems more beautiful
PALETTE_RGB = DETECTRON_PALETTE
| 4,457 | 33.828125 | 117 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/fs.py | # -*- coding: utf-8 -*-
# File: fs.py
import errno
import os
import tqdm
from six.moves import urllib
from . import logger
from .utils import execute_only_once
__all__ = ['mkdir_p', 'download', 'recursive_walk', 'get_dataset_path', 'normpath']
def mkdir_p(dirname):
""" Like "mkdir -p", make a dir recursively, but do nothing if the dir exists
Args:
dirname(str):
"""
assert dirname is not None
if dirname == '' or os.path.isdir(dirname):
return
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise e
def download(url, dir, filename=None, expect_size=None):
"""
Download URL to a directory.
Will figure out the filename automatically from URL, if not given.
"""
mkdir_p(dir)
if filename is None:
filename = url.split('/')[-1]
fpath = os.path.join(dir, filename)
if os.path.isfile(fpath):
if expect_size is not None and os.stat(fpath).st_size == expect_size:
logger.info("File {} exists! Skip download.".format(filename))
return fpath
else:
logger.warn("File {} exists. Will overwrite with a new download!".format(filename))
def hook(t):
last_b = [0]
def inner(b, bsize, tsize=None):
if tsize is not None:
t.total = tsize
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
try:
with tqdm.tqdm(unit='B', unit_scale=True, miniters=1, desc=filename) as t:
fpath, _ = urllib.request.urlretrieve(url, fpath, reporthook=hook(t))
statinfo = os.stat(fpath)
size = statinfo.st_size
except IOError:
logger.error("Failed to download {}".format(url))
raise
assert size > 0, "Downloaded an empty file from {}!".format(url)
if expect_size is not None and size != expect_size:
logger.error("File downloaded from {} does not match the expected size!".format(url))
logger.error("You may have downloaded a broken file, or the upstream may have modified the file.")
# TODO human-readable size
logger.info('Succesfully downloaded ' + filename + ". " + str(size) + ' bytes.')
return fpath
def recursive_walk(rootdir):
"""
Yields:
str: All files in rootdir, recursively.
"""
for r, dirs, files in os.walk(rootdir):
for f in files:
yield os.path.join(r, f)
def get_dataset_path(*args):
"""
Get the path to some dataset under ``$TENSORPACK_DATASET``.
Args:
args: strings to be joined to form path.
Returns:
str: path to the dataset.
"""
d = os.environ.get('TENSORPACK_DATASET', None)
if d is None:
d = os.path.join(os.path.expanduser('~'), 'tensorpack_data')
if execute_only_once():
logger.warn("Env var $TENSORPACK_DATASET not set, using {} for datasets.".format(d))
if not os.path.isdir(d):
mkdir_p(d)
logger.info("Created the directory {}.".format(d))
assert os.path.isdir(d), d
return os.path.join(d, *args)
def normpath(path):
"""
Normalizes a path to a folder by taking into consideration remote storages like Cloud storaged
referenced by '://' at the beginning of the path.
Args:
args: path to be normalized.
Returns:
str: normalized path.
"""
return path if '://' in path else os.path.normpath(path)
if __name__ == '__main__':
download('http://dl.caffe.berkeleyvision.org/caffe_ilsvrc12.tar.gz', '.')
| 3,592 | 27.744 | 106 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/stats.py | # -*- coding: utf-8 -*-
# File: stats.py
import numpy as np
__all__ = ['StatCounter', 'BinaryStatistics', 'RatioCounter', 'Accuracy',
'OnlineMoments']
class StatCounter(object):
""" A simple counter"""
def __init__(self):
self.reset()
def feed(self, v):
"""
Args:
v(float or np.ndarray): has to be the same shape between calls.
"""
self._values.append(v)
def reset(self):
self._values = []
@property
def count(self):
return len(self._values)
@property
def average(self):
assert len(self._values)
return np.mean(self._values)
@property
def sum(self):
assert len(self._values)
return np.sum(self._values)
@property
def max(self):
assert len(self._values)
return max(self._values)
@property
def min(self):
assert len(self._values)
return min(self._values)
def samples(self):
"""
Returns all samples.
"""
return self._values
class RatioCounter(object):
""" A counter to count ratio of something. """
def __init__(self):
self.reset()
def reset(self):
self._tot = 0
self._cnt = 0
def feed(self, count, total=1):
"""
Args:
cnt(int): the count of some event of interest.
tot(int): the total number of events.
"""
self._tot += total
self._cnt += count
@property
def ratio(self):
if self._tot == 0:
return 0
return self._cnt * 1.0 / self._tot
@property
def total(self):
"""
Returns:
int: the total
"""
return self._tot
@property
def count(self):
"""
Returns:
int: the total
"""
return self._cnt
class Accuracy(RatioCounter):
""" A RatioCounter with a fancy name """
@property
def accuracy(self):
return self.ratio
class BinaryStatistics(object):
"""
Statistics for binary decision,
including precision, recall, false positive, false negative
"""
def __init__(self):
self.reset()
def reset(self):
self.nr_pos = 0 # positive label
self.nr_neg = 0 # negative label
self.nr_pred_pos = 0
self.nr_pred_neg = 0
self.corr_pos = 0 # correct predict positive
self.corr_neg = 0 # correct predict negative
def feed(self, pred, label):
"""
Args:
pred (np.ndarray): binary array.
label (np.ndarray): binary array of the same size.
"""
assert pred.shape == label.shape, "{} != {}".format(pred.shape, label.shape)
self.nr_pos += (label == 1).sum()
self.nr_neg += (label == 0).sum()
self.nr_pred_pos += (pred == 1).sum()
self.nr_pred_neg += (pred == 0).sum()
self.corr_pos += ((pred == 1) & (pred == label)).sum()
self.corr_neg += ((pred == 0) & (pred == label)).sum()
@property
def precision(self):
if self.nr_pred_pos == 0:
return 0
return self.corr_pos * 1. / self.nr_pred_pos
@property
def recall(self):
if self.nr_pos == 0:
return 0
return self.corr_pos * 1. / self.nr_pos
@property
def false_positive(self):
if self.nr_pred_pos == 0:
return 0
return 1 - self.precision
@property
def false_negative(self):
if self.nr_pos == 0:
return 0
return 1 - self.recall
class OnlineMoments(object):
"""Compute 1st and 2nd moments online (to avoid storing all elements).
See algorithm at: https://www.wikiwand.com/en/Algorithms_for_calculating_variance#/Online_algorithm
"""
def __init__(self):
self._mean = 0
self._M2 = 0
self._n = 0
def feed(self, x):
"""
Args:
x (float or np.ndarray): must have the same shape.
"""
self._n += 1
delta = x - self._mean
self._mean += delta * (1.0 / self._n)
delta2 = x - self._mean
self._M2 += delta * delta2
@property
def mean(self):
return self._mean
@property
def variance(self):
return self._M2 / (self._n - 1)
@property
def std(self):
return np.sqrt(self.variance)
| 4,416 | 21.651282 | 103 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/develop.py | # -*- coding: utf-8 -*-
# File: develop.py
# Author: tensorpack contributors
""" Utilities for developers only.
These are not visible to users (not automatically imported). And should not
appeared in docs."""
import functools
import importlib
import os
import types
from collections import defaultdict
from datetime import datetime
import six
from . import logger
__all__ = []
def create_dummy_class(klass, dependency):
"""
When a dependency of a class is not available, create a dummy class which throws ImportError when used.
Args:
klass (str): name of the class.
dependency (str): name of the dependency.
Returns:
class: a class object
"""
assert not building_rtfd()
class _DummyMetaClass(type):
# throw error on class attribute access
def __getattr__(_, __):
raise AttributeError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
@six.add_metaclass(_DummyMetaClass)
class _Dummy(object):
# throw error on constructor
def __init__(self, *args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, klass))
return _Dummy
def create_dummy_func(func, dependency):
"""
When a dependency of a function is not available, create a dummy function which throws ImportError when used.
Args:
func (str): name of the function.
dependency (str or list[str]): name(s) of the dependency.
Returns:
function: a function object
"""
assert not building_rtfd()
if isinstance(dependency, (list, tuple)):
dependency = ','.join(dependency)
def _dummy(*args, **kwargs):
raise ImportError("Cannot import '{}', therefore '{}' is not available".format(dependency, func))
return _dummy
def building_rtfd():
"""
Returns:
bool: if the library is being imported to generate docs now.
"""
return os.environ.get('READTHEDOCS') == 'True' \
or os.environ.get('DOC_BUILDING')
_DEPRECATED_LOG_NUM = defaultdict(int)
def log_deprecated(name="", text="", eos="", max_num_warnings=None):
"""
Log deprecation warning.
Args:
name (str): name of the deprecated item.
text (str, optional): information about the deprecation.
eos (str, optional): end of service date such as "YYYY-MM-DD".
max_num_warnings (int, optional): the maximum number of times to print this warning
"""
assert name or text
if eos:
eos = "after " + datetime(*map(int, eos.split("-"))).strftime("%d %b")
if name:
if eos:
warn_msg = "%s will be deprecated %s. %s" % (name, eos, text)
else:
warn_msg = "%s was deprecated. %s" % (name, text)
else:
warn_msg = text
if eos:
warn_msg += " Legacy period ends %s" % eos
if max_num_warnings is not None:
if _DEPRECATED_LOG_NUM[warn_msg] >= max_num_warnings:
return
_DEPRECATED_LOG_NUM[warn_msg] += 1
logger.warn("[Deprecated] " + warn_msg)
def deprecated(text="", eos="", max_num_warnings=None):
"""
Args:
text, eos, max_num_warnings: same as :func:`log_deprecated`.
Returns:
a decorator which deprecates the function.
Example:
.. code-block:: python
@deprecated("Explanation of what to do instead.", "2017-11-4")
def foo(...):
pass
"""
def get_location():
import inspect
frame = inspect.currentframe()
if frame:
callstack = inspect.getouterframes(frame)[-1]
return '%s:%i' % (callstack[1], callstack[2])
else:
stack = inspect.stack(0)
entry = stack[2]
return '%s:%i' % (entry[1], entry[2])
def deprecated_inner(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
name = "{} [{}]".format(func.__name__, get_location())
log_deprecated(name, text, eos, max_num_warnings=max_num_warnings)
return func(*args, **kwargs)
return new_func
return deprecated_inner
def HIDE_DOC(func):
func.__HIDE_SPHINX_DOC__ = True
return func
# Copied from https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/util/lazy_loader.py
class LazyLoader(types.ModuleType):
def __init__(self, local_name, parent_module_globals, name):
self._local_name = local_name
self._parent_module_globals = parent_module_globals
super(LazyLoader, self).__init__(name)
def _load(self):
# Import the target module and insert it into the parent's namespace
module = importlib.import_module(self.__name__)
self._parent_module_globals[self._local_name] = module
# Update this object's dict so that if someone keeps a reference to the
# LazyLoader, lookups are efficient (__getattr__ is only called on lookups
# that fail).
self.__dict__.update(module.__dict__)
return module
def __getattr__(self, item):
module = self._load()
return getattr(module, item)
def __dir__(self):
module = self._load()
return dir(module)
| 5,284 | 28.361111 | 113 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/utils.py | # -*- coding: utf-8 -*-
# File: utils.py
import inspect
import numpy as np
import re
import os
import sys
from contextlib import contextmanager
from datetime import datetime, timedelta
from tqdm import tqdm
from . import logger
from .concurrency import subproc_call
__all__ = ['change_env',
'get_rng',
'fix_rng_seed',
'get_tqdm',
'execute_only_once',
'humanize_time_delta'
]
def humanize_time_delta(sec):
"""Humanize timedelta given in seconds
Args:
sec (float): time difference in seconds. Must be positive.
Returns:
str - time difference as a readable string
Example:
.. code-block:: python
print(humanize_time_delta(1)) # 1 second
print(humanize_time_delta(60 + 1)) # 1 minute 1 second
print(humanize_time_delta(87.6)) # 1 minute 27 seconds
print(humanize_time_delta(0.01)) # 0.01 seconds
print(humanize_time_delta(60 * 60 + 1)) # 1 hour 1 second
print(humanize_time_delta(60 * 60 * 24 + 1)) # 1 day 1 second
print(humanize_time_delta(60 * 60 * 24 + 60 * 2 + 60*60*9 + 3)) # 1 day 9 hours 2 minutes 3 seconds
"""
if sec < 0:
logger.warn("humanize_time_delta() obtains negative seconds!")
return "{:.3g} seconds".format(sec)
if sec == 0:
return "0 second"
time = datetime(2000, 1, 1) + timedelta(seconds=int(sec))
units = ['day', 'hour', 'minute', 'second']
vals = [int(sec // 86400), time.hour, time.minute, time.second]
if sec < 60:
vals[-1] = sec
def _format(v, u):
return "{:.3g} {}{}".format(v, u, "s" if v > 1 else "")
ans = []
for v, u in zip(vals, units):
if v > 0:
ans.append(_format(v, u))
return " ".join(ans)
@contextmanager
def change_env(name, val):
"""
Args:
name(str): name of the env var
val(str or None): the value, or set to None to clear the env var.
Returns:
a context where the environment variable ``name`` being set to
``val``. It will be set back after the context exits.
"""
oldval = os.environ.get(name, None)
if val is None:
try:
del os.environ[name]
except KeyError:
pass
else:
os.environ[name] = val
yield
if oldval is None:
try:
del os.environ[name]
except KeyError:
pass
else:
os.environ[name] = oldval
_RNG_SEED = None
def fix_rng_seed(seed):
"""
Call this function at the beginning of program to fix rng seed within tensorpack.
Args:
seed (int):
Note:
See https://github.com/tensorpack/tensorpack/issues/196.
Example:
Fix random seed in both tensorpack and tensorflow.
.. code-block:: python
seed = 42
utils.fix_rng_seed(seed)
tesnorflow.set_random_seed(seed)
# run trainer
"""
global _RNG_SEED
_RNG_SEED = int(seed)
def get_rng(obj=None):
"""
Get a good RNG seeded with time, pid and the object.
Args:
obj: some object to use to generate random seed.
Returns:
np.random.RandomState: the RNG.
"""
seed = (id(obj) + os.getpid() +
int(datetime.now().strftime("%Y%m%d%H%M%S%f"))) % 4294967295
if _RNG_SEED is not None:
seed = _RNG_SEED
return np.random.RandomState(seed)
_EXECUTE_HISTORY = set()
def execute_only_once():
"""
Each called in the code to this function is guaranteed to return True the
first time and False afterwards.
Returns:
bool: whether this is the first time this function gets called from this line of code.
Example:
.. code-block:: python
if execute_only_once():
# do something only once
"""
f = inspect.currentframe().f_back
ident = (f.f_code.co_filename, f.f_lineno)
if ident in _EXECUTE_HISTORY:
return False
_EXECUTE_HISTORY.add(ident)
return True
def _pick_tqdm_interval(file):
# Heuristics to pick a update interval for progress bar that's nice-looking for users.
isatty = file.isatty()
# Jupyter notebook should be recognized as tty.
# Wait for https://github.com/ipython/ipykernel/issues/268
try:
from ipykernel import iostream
if isinstance(file, iostream.OutStream):
isatty = True
except ImportError:
pass
if isatty:
return 0.5
else:
# When run under mpirun/slurm, isatty is always False.
# Here we apply some hacky heuristics for slurm.
if 'SLURM_JOB_ID' in os.environ:
if int(os.environ.get('SLURM_JOB_NUM_NODES', 1)) > 1:
# multi-machine job, probably not interactive
return 60
else:
# possibly interactive, so let's be conservative
return 15
if 'OMPI_COMM_WORLD_SIZE' in os.environ:
return 60
# If not a tty, don't refresh progress bar that often
return 180
def get_tqdm_kwargs(**kwargs):
"""
Return default arguments to be used with tqdm.
Args:
kwargs: extra arguments to be used.
Returns:
dict:
"""
default = dict(
smoothing=0.5,
dynamic_ncols=True,
ascii=True,
bar_format='{l_bar}{bar}|{n_fmt}/{total_fmt}[{elapsed}<{remaining},{rate_noinv_fmt}]'
)
try:
# Use this env var to override the refresh interval setting
interval = float(os.environ['TENSORPACK_PROGRESS_REFRESH'])
except KeyError:
interval = _pick_tqdm_interval(kwargs.get('file', sys.stderr))
default['mininterval'] = interval
default.update(kwargs)
return default
def get_tqdm(*args, **kwargs):
""" Similar to :func:`tqdm.tqdm()`,
but use tensorpack's default options to have consistent style. """
return tqdm(*args, **get_tqdm_kwargs(**kwargs))
def find_library_full_path(name):
"""
Similar to `from ctypes.util import find_library`, but try
to return full path if possible.
"""
from ctypes.util import find_library
if os.name == "posix" and sys.platform == "darwin":
# on Mac, ctypes already returns full path
return find_library(name)
def _use_proc_maps(name):
"""
Find so from /proc/pid/maps
Only works with libraries that has already been loaded.
But this is the most accurate method -- it finds the exact library that's being used.
"""
procmap = os.path.join('/proc', str(os.getpid()), 'maps')
if not os.path.isfile(procmap):
return None
try:
with open(procmap, 'r') as f:
for line in f:
line = line.strip().split(' ')
sofile = line[-1]
basename = os.path.basename(sofile)
if 'lib' + name + '.so' in basename:
if os.path.isfile(sofile):
return os.path.realpath(sofile)
except IOError:
# can fail in certain environment (e.g. chroot)
# if the pids are incorrectly mapped
pass
# The following two methods come from https://github.com/python/cpython/blob/master/Lib/ctypes/util.py
def _use_ld(name):
"""
Find so with `ld -lname -Lpath`.
It will search for files in LD_LIBRARY_PATH, but not in ldconfig.
"""
cmd = "ld -t -l{} -o {}".format(name, os.devnull)
ld_lib_path = os.environ.get('LD_LIBRARY_PATH', '')
for d in ld_lib_path.split(':'):
cmd = cmd + " -L " + d
result, ret = subproc_call(cmd + '|| true')
expr = r'[^\(\)\s]*lib%s\.[^\(\)\s]*' % re.escape(name)
res = re.search(expr, result.decode('utf-8'))
if res:
res = res.group(0)
if not os.path.isfile(res):
return None
return os.path.realpath(res)
def _use_ldconfig(name):
"""
Find so in `ldconfig -p`.
It does not handle LD_LIBRARY_PATH.
"""
with change_env('LC_ALL', 'C'), change_env('LANG', 'C'):
ldconfig, ret = subproc_call("ldconfig -p")
ldconfig = ldconfig.decode('utf-8')
if ret != 0:
return None
expr = r'\s+(lib%s\.[^\s]+)\s+\(.*=>\s+(.*)' % (re.escape(name))
res = re.search(expr, ldconfig)
if not res:
return None
else:
ret = res.group(2)
return os.path.realpath(ret)
if sys.platform.startswith('linux'):
return _use_proc_maps(name) or _use_ld(name) or _use_ldconfig(name) or find_library(name)
return find_library(name) # don't know what to do
| 9,003 | 28.045161 | 107 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/logger.py | # -*- coding: utf-8 -*-
# File: logger.py
"""
The logger module itself has the common logging functions of Python's
:class:`logging.Logger`. For example:
.. code-block:: python
from tensorpack.utils import logger
logger.set_logger_dir('train_log/test')
logger.info("Test")
logger.error("Error happened!")
"""
import logging
import os
import os.path
import shutil
import sys
from datetime import datetime
from six.moves import input
from termcolor import colored
__all__ = ['set_logger_dir', 'auto_set_dir', 'get_logger_dir']
class _MyFormatter(logging.Formatter):
def format(self, record):
date = colored('[%(asctime)s @%(filename)s:%(lineno)d]', 'green')
msg = '%(message)s'
if record.levelno == logging.WARNING:
fmt = date + ' ' + colored('WRN', 'red', attrs=['blink']) + ' ' + msg
elif record.levelno == logging.ERROR or record.levelno == logging.CRITICAL:
fmt = date + ' ' + colored('ERR', 'red', attrs=['blink', 'underline']) + ' ' + msg
elif record.levelno == logging.DEBUG:
fmt = date + ' ' + colored('DBG', 'yellow', attrs=['blink']) + ' ' + msg
else:
fmt = date + ' ' + msg
if hasattr(self, '_style'):
# Python3 compatibility
self._style._fmt = fmt
self._fmt = fmt
return super(_MyFormatter, self).format(record)
def _getlogger():
# this file is synced to "dataflow" package as well
package_name = "dataflow" if __name__.startswith("dataflow") else "tensorpack"
logger = logging.getLogger(package_name)
logger.propagate = False
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
logger.addHandler(handler)
return logger
_logger = _getlogger()
_LOGGING_METHOD = ['info', 'warning', 'error', 'critical', 'exception', 'debug', 'setLevel', 'addFilter']
# export logger functions
for func in _LOGGING_METHOD:
locals()[func] = getattr(_logger, func)
__all__.append(func)
# 'warn' is deprecated in logging module
warn = _logger.warning
__all__.append('warn')
def _get_time_str():
return datetime.now().strftime('%m%d-%H%M%S')
# globals: logger file and directory:
LOG_DIR = None
_FILE_HANDLER = None
def _set_file(path):
global _FILE_HANDLER
if os.path.isfile(path):
backup_name = path + '.' + _get_time_str()
shutil.move(path, backup_name)
_logger.info("Existing log file '{}' backuped to '{}'".format(path, backup_name)) # noqa: F821
hdl = logging.FileHandler(
filename=path, encoding='utf-8', mode='w')
hdl.setFormatter(_MyFormatter(datefmt='%m%d %H:%M:%S'))
_FILE_HANDLER = hdl
_logger.addHandler(hdl)
_logger.info("Argv: " + ' '.join(sys.argv))
def set_logger_dir(dirname, action=None):
"""
Set the directory for global logging.
Args:
dirname(str): log directory
action(str): an action of ["k","d","q"] to be performed
when the directory exists. Will ask user by default.
"d": delete the directory. Note that the deletion may fail when
the directory is used by tensorboard.
"k": keep the directory. This is useful when you resume from a
previous training and want the directory to look as if the
training was not interrupted.
Note that this option does not load old models or any other
old states for you. It simply does nothing.
"""
dirname = os.path.normpath(dirname)
global LOG_DIR, _FILE_HANDLER
if _FILE_HANDLER:
# unload and close the old file handler, so that we may safely delete the logger directory
_logger.removeHandler(_FILE_HANDLER)
del _FILE_HANDLER
def dir_nonempty(dirname):
# If directory exists and nonempty (ignore hidden files), prompt for action
return os.path.isdir(dirname) and len([x for x in os.listdir(dirname) if x[0] != '.'])
if dir_nonempty(dirname):
if not action:
_logger.warning("""\
Log directory {} exists! Use 'd' to delete it. """.format(dirname))
_logger.warning("""\
If you're resuming from a previous run, you can choose to keep it.
Press any other key to exit. """)
while not action:
action = input("Select Action: k (keep) / d (delete) / q (quit):").lower().strip()
act = action
if act == 'b':
backup_name = dirname + _get_time_str()
shutil.move(dirname, backup_name)
info("Directory '{}' backuped to '{}'".format(dirname, backup_name)) # noqa: F821
elif act == 'd':
shutil.rmtree(dirname, ignore_errors=True)
if dir_nonempty(dirname):
shutil.rmtree(dirname, ignore_errors=False)
elif act == 'n':
dirname = dirname + _get_time_str()
info("Use a new log directory {}".format(dirname)) # noqa: F821
elif act == 'k':
pass
else:
raise OSError("Directory {} exits!".format(dirname))
LOG_DIR = dirname
from .fs import mkdir_p
mkdir_p(dirname)
_set_file(os.path.join(dirname, 'log.log'))
def auto_set_dir(action=None, name=None):
"""
Use :func:`logger.set_logger_dir` to set log directory to
"./train_log/{scriptname}:{name}". "scriptname" is the name of the main python file currently running"""
mod = sys.modules['__main__']
basename = os.path.basename(mod.__file__)
auto_dirname = os.path.join('train_log', basename[:basename.rfind('.')])
if name:
auto_dirname += '_%s' % name if os.name == 'nt' else ':%s' % name
set_logger_dir(auto_dirname, action=action)
def get_logger_dir():
"""
Returns:
The logger directory, or None if not set.
The directory is used for general logging, tensorboard events, checkpoints, etc.
"""
return LOG_DIR
| 5,996 | 33.268571 | 108 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/loadcaffe.py | # -*- coding: utf-8 -*-
# File: loadcaffe.py
import numpy as np
import os
import sys
from . import logger
from .concurrency import subproc_call
from .fs import download, get_dataset_path
from .utils import change_env
__all__ = ['load_caffe', 'get_caffe_pb']
CAFFE_PROTO_URL = "https://github.com/BVLC/caffe/raw/master/src/caffe/proto/caffe.proto"
class CaffeLayerProcessor(object):
def __init__(self, net):
self.net = net
self.layer_names = net._layer_names
self.param_dict = {}
self.processors = {
'Convolution': self.proc_conv,
'InnerProduct': self.proc_fc,
'BatchNorm': self.proc_bn,
'Scale': self.proc_scale
}
def process(self):
for idx, layer in enumerate(self.net.layers):
param = layer.blobs
name = self.layer_names[idx]
if layer.type in self.processors:
logger.info("Processing layer {} of type {}".format(
name, layer.type))
dic = self.processors[layer.type](idx, name, param)
self.param_dict.update(dic)
elif len(layer.blobs) != 0:
logger.warn(
"{} layer contains parameters but is not supported!".format(layer.type))
return self.param_dict
def proc_conv(self, idx, name, param):
assert len(param) <= 2
assert param[0].data.ndim == 4
# caffe: ch_out, ch_in, h, w
W = param[0].data.transpose(2, 3, 1, 0)
if len(param) == 1:
return {name + '/W': W}
else:
return {name + '/W': W,
name + '/b': param[1].data}
def proc_fc(self, idx, name, param):
# TODO caffe has an 'transpose' option for fc/W
assert len(param) == 2
prev_layer_name = self.net.bottom_names[name][0]
prev_layer_output = self.net.blobs[prev_layer_name].data
if prev_layer_output.ndim == 4:
logger.info("FC layer {} takes spatial data.".format(name))
W = param[0].data
# original: outx(CxHxW)
W = W.reshape((-1,) + prev_layer_output.shape[1:]).transpose(2, 3, 1, 0)
# become: (HxWxC)xout
else:
W = param[0].data.transpose()
return {name + '/W': W,
name + '/b': param[1].data}
def proc_bn(self, idx, name, param):
scale_factor = param[2].data[0]
return {name + '/mean/EMA': param[0].data / scale_factor,
name + '/variance/EMA': param[1].data / scale_factor}
def proc_scale(self, idx, name, param):
bottom_name = self.net.bottom_names[name][0]
# find the bn layer before this scaling
for i, layer in enumerate(self.net.layers):
if layer.type == 'BatchNorm':
name2 = self.layer_names[i]
bottom_name2 = self.net.bottom_names[name2][0]
if bottom_name2 == bottom_name:
# scaling and BN share the same bottom, should merge
logger.info("Merge {} and {} into one BatchNorm layer".format(
name, name2))
return {name2 + '/beta': param[1].data,
name2 + '/gamma': param[0].data}
# assume this scaling layer is part of some BN
logger.error("Could not find a BN layer corresponding to this Scale layer!")
raise ValueError()
def load_caffe(model_desc, model_file):
"""
Load a caffe model. You must be able to ``import caffe`` to use this
function.
Args:
model_desc (str): path to caffe model description file (.prototxt).
model_file (str): path to caffe model parameter file (.caffemodel).
Returns:
dict: the parameters.
"""
with change_env('GLOG_minloglevel', '2'):
import caffe
caffe.set_mode_cpu()
net = caffe.Net(model_desc, model_file, caffe.TEST)
param_dict = CaffeLayerProcessor(net).process()
logger.info("Model loaded from caffe. Params: " +
", ".join(sorted(param_dict.keys())))
return param_dict
def get_caffe_pb():
"""
Get caffe protobuf.
Returns:
The imported caffe protobuf module.
"""
dir = get_dataset_path('caffe')
caffe_pb_file = os.path.join(dir, 'caffe_pb2.py')
if not os.path.isfile(caffe_pb_file):
download(CAFFE_PROTO_URL, dir)
assert os.path.isfile(os.path.join(dir, 'caffe.proto'))
cmd = "protoc --version"
version, ret = subproc_call(cmd, timeout=3)
if ret != 0:
sys.exit(1)
try:
version = version.decode('utf-8')
version = float('.'.join(version.split(' ')[1].split('.')[:2]))
assert version >= 2.7, "Require protoc>=2.7 for Python3"
except Exception:
logger.exception("protoc --version gives: " + str(version))
raise
cmd = 'cd {} && protoc caffe.proto --python_out .'.format(dir)
ret = os.system(cmd)
assert ret == 0, \
"Command `{}` failed!".format(cmd)
assert os.path.isfile(caffe_pb_file), caffe_pb_file
import imp
return imp.load_source('caffepb', caffe_pb_file)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model', help='.prototxt file')
parser.add_argument('weights', help='.caffemodel file')
parser.add_argument('output', help='output npz file')
args = parser.parse_args()
ret = load_caffe(args.model, args.weights)
if args.output.endswith('.npz'):
np.savez_compressed(args.output, **ret)
elif args.output.endswith('.npy'):
logger.warn("Please use npz format instead!")
np.save(args.output, ret)
else:
raise ValueError("Unknown format {}".format(args.output))
| 5,887 | 34.257485 | 92 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/compatible_serialize.py | from .serialize import loads, dumps # noqa
# keep this file for BC
| 69 | 16.5 | 43 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/viz.py | # -*- coding: utf-8 -*-
# File: viz.py
# Credit: zxytim
import numpy as np
import os
import sys
from ..utils.develop import create_dummy_func # noqa
from .argtools import shape2d
from .fs import mkdir_p
try:
import cv2
except ImportError:
pass
__all__ = ['interactive_imshow',
'stack_patches', 'gen_stack_patches',
'dump_dataflow_images', 'intensity_to_rgb',
'draw_boxes']
def interactive_imshow(img, lclick_cb=None, rclick_cb=None, **kwargs):
"""
Args:
img (np.ndarray): an image (expect BGR) to show.
lclick_cb, rclick_cb: a callback ``func(img, x, y)`` for left/right click event.
kwargs: can be {key_cb_a: callback_img, key_cb_b: callback_img}, to
specify a callback ``func(img)`` for keypress.
Some existing keypress event handler:
* q: destroy the current window
* x: execute ``sys.exit()``
* s: save image to "out.png"
"""
name = 'tensorpack_viz_window'
cv2.imshow(name, img)
def mouse_cb(event, x, y, *args):
if event == cv2.EVENT_LBUTTONUP and lclick_cb is not None:
lclick_cb(img, x, y)
elif event == cv2.EVENT_RBUTTONUP and rclick_cb is not None:
rclick_cb(img, x, y)
cv2.setMouseCallback(name, mouse_cb)
key = cv2.waitKey(-1)
while key >= 128:
key = cv2.waitKey(-1)
key = chr(key & 0xff)
cb_name = 'key_cb_' + key
if cb_name in kwargs:
kwargs[cb_name](img)
elif key == 'q':
cv2.destroyWindow(name)
elif key == 'x':
sys.exit()
elif key == 's':
cv2.imwrite('out.png', img)
elif key in ['+', '=']:
img = cv2.resize(img, None, fx=1.3, fy=1.3, interpolation=cv2.INTER_CUBIC)
interactive_imshow(img, lclick_cb, rclick_cb, **kwargs)
elif key == '-':
img = cv2.resize(img, None, fx=0.7, fy=0.7, interpolation=cv2.INTER_CUBIC)
interactive_imshow(img, lclick_cb, rclick_cb, **kwargs)
def _preprocess_patch_list(plist):
plist = np.asarray(plist)
assert plist.dtype != np.object
if plist.ndim == 3:
plist = plist[:, :, :, np.newaxis]
assert plist.ndim == 4 and plist.shape[3] in [1, 3], plist.shape
return plist
def _pad_patch_list(plist, bgcolor):
if isinstance(bgcolor, int):
bgcolor = (bgcolor, bgcolor, bgcolor)
def _pad_channel(plist):
ret = []
for p in plist:
if len(p.shape) == 2:
p = p[:, :, np.newaxis]
if p.shape[2] == 1:
p = np.repeat(p, 3, 2)
ret.append(p)
return ret
plist = _pad_channel(plist)
shapes = [x.shape for x in plist]
ph = max(s[0] for s in shapes)
pw = max(s[1] for s in shapes)
ret = np.zeros((len(plist), ph, pw, 3), dtype=plist[0].dtype)
ret[:, :, :] = bgcolor
for idx, p in enumerate(plist):
s = p.shape
sh = (ph - s[0]) // 2
sw = (pw - s[1]) // 2
ret[idx, sh:sh + s[0], sw:sw + s[1], :] = p
return ret
class Canvas(object):
def __init__(self, ph, pw,
nr_row, nr_col,
channel, border, bgcolor):
self.ph = ph
self.pw = pw
self.nr_row = nr_row
self.nr_col = nr_col
if border is None:
border = int(0.05 * min(ph, pw))
self.border = border
if isinstance(bgcolor, int):
bgchannel = 1
else:
bgchannel = 3
self.bgcolor = bgcolor
self.channel = max(channel, bgchannel)
self.canvas = np.zeros((nr_row * (ph + border) - border,
nr_col * (pw + border) - border,
self.channel), dtype='uint8')
def draw_patches(self, plist):
assert self.nr_row * self.nr_col >= len(plist), \
"{}*{} < {}".format(self.nr_row, self.nr_col, len(plist))
if self.channel == 3 and plist.shape[3] == 1:
plist = np.repeat(plist, 3, axis=3)
cur_row, cur_col = 0, 0
if self.channel == 1:
self.canvas.fill(self.bgcolor)
else:
self.canvas[:, :, :] = self.bgcolor
for patch in plist:
r0 = cur_row * (self.ph + self.border)
c0 = cur_col * (self.pw + self.border)
self.canvas[r0:r0 + self.ph, c0:c0 + self.pw] = patch
cur_col += 1
if cur_col == self.nr_col:
cur_col = 0
cur_row += 1
def get_patchid_from_coord(self, x, y):
x = x // (self.pw + self.border)
y = y // (self.pw + self.border)
idx = y * self.nr_col + x
return idx
def stack_patches(
patch_list, nr_row, nr_col, border=None,
pad=False, bgcolor=255, viz=False, lclick_cb=None):
"""
Stacked patches into grid, to produce visualizations like the following:
.. image:: https://github.com/tensorpack/tensorpack/raw/master/examples/GAN/demo/BEGAN-CelebA-samples.jpg
Args:
patch_list(list[ndarray] or ndarray): NHW or NHWC images in [0,255].
nr_row(int), nr_col(int): rows and cols of the grid.
``nr_col * nr_row`` must be no less than ``len(patch_list)``.
border(int): border length between images.
Defaults to ``0.05 * min(patch_width, patch_height)``.
pad (boolean): when `patch_list` is a list, pad all patches to the maximum height and width.
This option allows stacking patches of different shapes together.
bgcolor(int or 3-tuple): background color in [0, 255]. Either an int
or a BGR tuple.
viz(bool): whether to use :func:`interactive_imshow` to visualize the results.
lclick_cb: A callback function ``f(patch, patch index in patch_list)``
to get called when a patch get clicked in imshow.
Returns:
np.ndarray: the stacked image.
"""
if pad:
patch_list = _pad_patch_list(patch_list, bgcolor)
patch_list = _preprocess_patch_list(patch_list)
if lclick_cb is not None:
viz = True
ph, pw = patch_list.shape[1:3]
canvas = Canvas(ph, pw, nr_row, nr_col,
patch_list.shape[-1], border, bgcolor)
if lclick_cb is not None:
def lclick_callback(img, x, y):
idx = canvas.get_patchid_from_coord(x, y)
lclick_cb(patch_list[idx], idx)
else:
lclick_callback = None
canvas.draw_patches(patch_list)
if viz:
interactive_imshow(canvas.canvas, lclick_cb=lclick_callback)
return canvas.canvas
def gen_stack_patches(patch_list,
nr_row=None, nr_col=None, border=None,
max_width=1000, max_height=1000,
bgcolor=255, viz=False, lclick_cb=None):
"""
Similar to :func:`stack_patches` but with a generator interface.
It takes a much-longer list and yields stacked results one by one.
For example, if ``patch_list`` contains 1000 images and ``nr_row==nr_col==10``,
this generator yields 10 stacked images.
Args:
nr_row(int), nr_col(int): rows and cols of each result.
max_width(int), max_height(int): Maximum allowed size of the
stacked image. If ``nr_row/nr_col`` are None, this number
will be used to infer the rows and cols. Otherwise the option is
ignored.
patch_list, border, viz, lclick_cb: same as in :func:`stack_patches`.
Yields:
np.ndarray: the stacked image.
"""
# setup parameters
patch_list = _preprocess_patch_list(patch_list)
if lclick_cb is not None:
viz = True
ph, pw = patch_list.shape[1:3]
if border is None:
border = int(0.05 * min(ph, pw))
if nr_row is None:
nr_row = int(max_height / (ph + border))
if nr_col is None:
nr_col = int(max_width / (pw + border))
canvas = Canvas(ph, pw, nr_row, nr_col, patch_list.shape[-1], border, bgcolor)
nr_patch = nr_row * nr_col
start = 0
if lclick_cb is not None:
def lclick_callback(img, x, y):
idx = canvas.get_patchid_from_coord(x, y)
idx = idx + start
if idx < end:
lclick_cb(patch_list[idx], idx)
else:
lclick_callback = None
while True:
end = start + nr_patch
cur_list = patch_list[start:end]
if not len(cur_list):
return
canvas.draw_patches(cur_list)
if viz:
interactive_imshow(canvas.canvas, lclick_cb=lclick_callback)
yield canvas.canvas
start = end
def dump_dataflow_images(df, index=0, batched=True,
number=1000, output_dir=None,
scale=1, resize=None, viz=None,
flipRGB=False):
"""
Dump or visualize images of a :class:`DataFlow`.
Args:
df (DataFlow): the DataFlow.
index (int): the index of the image component.
batched (bool): whether the component contains batched images (NHW or
NHWC) or not (HW or HWC).
number (int): how many datapoint to take from the DataFlow.
output_dir (str): output directory to save images, default to not save.
scale (float): scale the value, usually either 1 or 255.
resize (tuple or None): tuple of (h, w) to resize the images to.
viz (tuple or None): tuple of (h, w) determining the grid size to use
with :func:`gen_stack_patches` for visualization. No visualization will happen by
default.
flipRGB (bool): apply a RGB<->BGR conversion or not.
"""
if output_dir:
mkdir_p(output_dir)
if viz is not None:
viz = shape2d(viz)
vizsize = viz[0] * viz[1]
if resize is not None:
resize = tuple(shape2d(resize))
vizlist = []
df.reset_state()
cnt = 0
while True:
for dp in df:
if not batched:
imgbatch = [dp[index]]
else:
imgbatch = dp[index]
for img in imgbatch:
cnt += 1
if cnt == number:
return
if scale != 1:
img = img * scale
if resize is not None:
img = cv2.resize(img, resize)
if flipRGB:
img = img[:, :, ::-1]
if output_dir:
fname = os.path.join(output_dir, '{:03d}.jpg'.format(cnt))
cv2.imwrite(fname, img)
if viz is not None:
vizlist.append(img)
if viz is not None and len(vizlist) >= vizsize:
stack_patches(
vizlist[:vizsize],
nr_row=viz[0], nr_col=viz[1], viz=True)
vizlist = vizlist[vizsize:]
def intensity_to_rgb(intensity, cmap='cubehelix', normalize=False):
"""
Convert a 1-channel matrix of intensities to an RGB image employing a colormap.
This function requires matplotlib. See `matplotlib colormaps
<http://matplotlib.org/examples/color/colormaps_reference.html>`_ for a
list of available colormap.
Args:
intensity (np.ndarray): array of intensities such as saliency.
cmap (str): name of the colormap to use.
normalize (bool): if True, will normalize the intensity so that it has
minimum 0 and maximum 1.
Returns:
np.ndarray: an RGB float32 image in range [0, 255], a colored heatmap.
"""
assert intensity.ndim == 2, intensity.shape
intensity = intensity.astype("float")
if normalize:
intensity -= intensity.min()
intensity /= intensity.max()
cmap = plt.get_cmap(cmap)
intensity = cmap(intensity)[..., :3]
return intensity.astype('float32') * 255.0
def draw_text(img, pos, text, color, font_scale=0.4):
"""
Draw text on an image.
Args:
pos (tuple): x, y; the position of the text
text (str):
font_scale (float):
color (tuple): a 3-tuple BGR color in [0, 255]
"""
img = img.astype(np.uint8)
x0, y0 = int(pos[0]), int(pos[1])
# Compute text size.
font = cv2.FONT_HERSHEY_SIMPLEX
((text_w, text_h), _) = cv2.getTextSize(text, font, font_scale, 1)
# Place text background.
if x0 + text_w > img.shape[1]:
x0 = img.shape[1] - text_w
if y0 - int(1.15 * text_h) < 0:
y0 = int(1.15 * text_h)
back_topleft = x0, y0 - int(1.3 * text_h)
back_bottomright = x0 + text_w, y0
cv2.rectangle(img, back_topleft, back_bottomright, color, -1)
# Show text.
text_bottomleft = x0, y0 - int(0.25 * text_h)
cv2.putText(img, text, text_bottomleft, font, font_scale, (222, 222, 222), lineType=cv2.LINE_AA)
return img
def draw_boxes(im, boxes, labels=None, color=None):
"""
Args:
im (np.ndarray): a BGR image in range [0,255]. It will not be modified.
boxes (np.ndarray): a numpy array of shape Nx4 where each row is [x1, y1, x2, y2].
labels: (list[str] or None)
color: a 3-tuple BGR color (in range [0, 255])
Returns:
np.ndarray: a new image.
"""
boxes = np.asarray(boxes, dtype='int32')
if labels is not None:
assert len(labels) == len(boxes), "{} != {}".format(len(labels), len(boxes))
areas = (boxes[:, 2] - boxes[:, 0] + 1) * (boxes[:, 3] - boxes[:, 1] + 1)
sorted_inds = np.argsort(-areas) # draw large ones first
assert areas.min() > 0, areas.min()
# allow equal, because we are not very strict about rounding error here
assert boxes[:, 0].min() >= 0 and boxes[:, 1].min() >= 0 \
and boxes[:, 2].max() <= im.shape[1] and boxes[:, 3].max() <= im.shape[0], \
"Image shape: {}\n Boxes:\n{}".format(str(im.shape), str(boxes))
im = im.copy()
if color is None:
color = (15, 128, 15)
if im.ndim == 2 or (im.ndim == 3 and im.shape[2] == 1):
im = cv2.cvtColor(im, cv2.COLOR_GRAY2BGR)
for i in sorted_inds:
box = boxes[i, :]
if labels is not None:
im = draw_text(im, (box[0], box[1]), labels[i], color=color)
cv2.rectangle(im, (box[0], box[1]), (box[2], box[3]),
color=color, thickness=1)
return im
try:
import matplotlib.pyplot as plt
except (ImportError, RuntimeError):
intensity_to_rgb = create_dummy_func('intensity_to_rgb', 'matplotlib') # noqa
if __name__ == '__main__':
if False:
imglist = []
for i in range(100):
fname = "{:03d}.png".format(i)
imglist.append(cv2.imread(fname))
for idx, patch in enumerate(gen_stack_patches(
imglist, max_width=500, max_height=200)):
of = "patch{:02d}.png".format(idx)
cv2.imwrite(of, patch)
if False:
imglist = []
img = cv2.imread('out.png')
img2 = cv2.resize(img, (300, 300))
viz = stack_patches([img, img2], 1, 2, pad=True, viz=True)
if False:
img = cv2.imread('cat.jpg')
boxes = np.asarray([
[10, 30, 200, 100],
[20, 80, 250, 250]
])
img = draw_boxes(img, boxes, ['asdfasdf', '11111111111111'])
interactive_imshow(img)
| 15,256 | 33.131991 | 109 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
"""
Common utils.
These utils should be irrelevant to tensorflow.
"""
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .utils import *
__all__ = []
def _global_import(name):
p = __import__(name, globals(), None, level=1)
lst = p.__all__ if '__all__' in dir(p) else dir(p)
for k in lst:
if not k.startswith('__'):
globals()[k] = p.__dict__[k]
__all__.append(k)
_global_import('utils')
# Import no other submodules. they are supposed to be explicitly imported by users.
__all__.extend(['logger'])
| 745 | 22.3125 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/nvml.py | # -*- coding: utf-8 -*-
# File: nvml.py
import threading
from ctypes import (
CDLL, POINTER, Structure, byref, c_uint,
c_ulonglong, create_string_buffer)
__all__ = ['NVMLContext']
NVML_ERROR_FUNCTION_NOT_FOUND = 13
NvmlErrorCodes = {"0": "NVML_SUCCESS",
"1": "NVML_ERROR_UNINITIALIZED",
"2": "NVML_ERROR_INVALID_ARGUMENT",
"3": "NVML_ERROR_NOT_SUPPORTED",
"4": "NVML_ERROR_NO_PERMISSION",
"5": "NVML_ERROR_ALREADY_INITIALIZED",
"6": "NVML_ERROR_NOT_FOUND",
"7": "NVML_ERROR_INSUFFICIENT_SIZE",
"8": "NVML_ERROR_INSUFFICIENT_POWER",
"9": "NVML_ERROR_DRIVER_NOT_LOADED",
"10": "NVML_ERROR_TIMEOUT",
"11": "NVML_ERROR_IRQ_ISSUE",
"12": "NVML_ERROR_LIBRARY_NOT_FOUND",
"13": "NVML_ERROR_FUNCTION_NOT_FOUND",
"14": "NVML_ERROR_CORRUPTED_INFOROM",
"15": "NVML_ERROR_GPU_IS_LOST",
"16": "NVML_ERROR_RESET_REQUIRED",
"17": "NVML_ERROR_OPERATING_SYSTEM",
"18": "NVML_ERROR_LIB_RM_VERSION_MISMATCH",
"999": "NVML_ERROR_UNKNOWN"}
class NvmlException(Exception):
def __init__(self, error_code):
super(NvmlException, self).__init__(error_code)
self.error_code = error_code
def __str__(self):
return NvmlErrorCodes[str(self.error_code)]
def _check_return(ret):
if (ret != 0):
raise NvmlException(ret)
return ret
class NVML(object):
"""
Loader for libnvidia-ml.so
"""
_nvmlLib = None
_lib_lock = threading.Lock()
def load(self):
with self._lib_lock:
if self._nvmlLib is None:
self._nvmlLib = CDLL("libnvidia-ml.so.1")
function_pointers = ["nvmlDeviceGetName", "nvmlDeviceGetUUID", "nvmlDeviceGetMemoryInfo",
"nvmlDeviceGetUtilizationRates", "nvmlInit_v2", "nvmlShutdown",
"nvmlDeviceGetCount_v2", "nvmlDeviceGetHandleByIndex_v2"]
self.func_ptr = {n: self._function_pointer(n) for n in function_pointers}
def _function_pointer(self, name):
try:
return getattr(self._nvmlLib, name)
except AttributeError:
raise NvmlException(NVML_ERROR_FUNCTION_NOT_FOUND)
def get_function(self, name):
if name in self.func_ptr.keys():
return self.func_ptr[name]
_NVML = NVML()
class NvidiaDevice(object):
"""Represent a single GPUDevice"""
def __init__(self, hnd):
super(NvidiaDevice, self).__init__()
self.hnd = hnd
def memory(self):
"""Memory information in bytes
Example:
>>> print(ctx.device(0).memory())
{'total': 4238016512L, 'used': 434831360L, 'free': 3803185152L}
Returns:
total/used/free memory in bytes
"""
class GpuMemoryInfo(Structure):
_fields_ = [
('total', c_ulonglong),
('free', c_ulonglong),
('used', c_ulonglong),
]
c_memory = GpuMemoryInfo()
_check_return(_NVML.get_function(
"nvmlDeviceGetMemoryInfo")(self.hnd, byref(c_memory)))
return {'total': c_memory.total, 'free': c_memory.free, 'used': c_memory.used}
def utilization(self):
"""Percent of time over the past second was utilized.
Details:
Percent of time over the past second during which one or more kernels was executing on the GPU.
Percent of time over the past second during which global (device) memory was being read or written
Example:
>>> print(ctx.device(0).utilization())
{'gpu': 4L, 'memory': 6L}
"""
class GpuUtilizationInfo(Structure):
_fields_ = [
('gpu', c_uint),
('memory', c_uint),
]
c_util = GpuUtilizationInfo()
_check_return(_NVML.get_function(
"nvmlDeviceGetUtilizationRates")(self.hnd, byref(c_util)))
return {'gpu': c_util.gpu, 'memory': c_util.memory}
def name(self):
buflen = 1024
buf = create_string_buffer(buflen)
fn = _NVML.get_function("nvmlDeviceGetName")
ret = fn(self.hnd, buf, c_uint(1024))
_check_return(ret)
return buf.value.decode('utf-8')
class NVMLContext(object):
"""Creates a context to query information
Example:
with NVMLContext() as ctx:
num_gpus = ctx.num_devices()
for device in ctx.devices():
print(device.memory())
print(device.utilization())
"""
def __enter__(self):
"""Create a new context """
_NVML.load()
_check_return(_NVML.get_function("nvmlInit_v2")())
return self
def __exit__(self, type, value, tb):
"""Destroy current context"""
_check_return(_NVML.get_function("nvmlShutdown")())
def num_devices(self):
"""Get number of devices """
c_count = c_uint()
_check_return(_NVML.get_function(
"nvmlDeviceGetCount_v2")(byref(c_count)))
return c_count.value
def devices(self):
"""
Returns:
[NvidiaDevice]: a list of devices
"""
return [self.device(i) for i in range(self.num_devices())]
def device(self, idx):
"""Get a specific GPU device
Args:
idx: index of device
Returns:
NvidiaDevice: single GPU device
"""
class GpuDevice(Structure):
pass
c_nvmlDevice_t = POINTER(GpuDevice)
c_index = c_uint(idx)
device = c_nvmlDevice_t()
_check_return(_NVML.get_function(
"nvmlDeviceGetHandleByIndex_v2")(c_index, byref(device)))
return NvidiaDevice(device)
if __name__ == '__main__':
with NVMLContext() as ctx:
for idx, dev in enumerate(ctx.devices()):
print(idx, dev.name())
with NVMLContext() as ctx:
print(ctx.devices())
print(ctx.devices()[0].utilization())
| 6,281 | 28.218605 | 109 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/debug.py | # -*- coding: utf-8 -*-
# File: debug.py
import sys
def enable_call_trace():
""" Enable trace for calls to any function. """
def tracer(frame, event, arg):
if event == 'call':
co = frame.f_code
func_name = co.co_name
if func_name == 'write' or func_name == 'print':
# ignore write() calls from print statements
return
func_line_no = frame.f_lineno
func_filename = co.co_filename
caller = frame.f_back
if caller:
caller_line_no = caller.f_lineno
caller_filename = caller.f_code.co_filename
print('Call to `%s` on line %s:%s from %s:%s' %
(func_name, func_filename, func_line_no,
caller_filename, caller_line_no))
return
sys.settrace(tracer)
if __name__ == '__main__':
enable_call_trace()
def b(a):
print(2)
def a():
print(1)
b(1)
a()
| 1,023 | 23.97561 | 63 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/serialize.py | # -*- coding: utf-8 -*-
# File: serialize.py
import os
import pickle
from multiprocessing.reduction import ForkingPickler
import msgpack
import msgpack_numpy
msgpack_numpy.patch()
assert msgpack.version >= (0, 5, 2)
__all__ = ['loads', 'dumps']
MAX_MSGPACK_LEN = 1000000000
class MsgpackSerializer(object):
@staticmethod
def dumps(obj):
"""
Serialize an object.
Returns:
Implementation-dependent bytes-like object.
"""
return msgpack.dumps(obj, use_bin_type=True)
@staticmethod
def loads(buf):
"""
Args:
buf: the output of `dumps`.
"""
# Since 0.6, the default max size was set to 1MB.
# We change it to approximately 1G.
return msgpack.loads(buf, raw=False,
max_bin_len=MAX_MSGPACK_LEN,
max_array_len=MAX_MSGPACK_LEN,
max_map_len=MAX_MSGPACK_LEN,
max_str_len=MAX_MSGPACK_LEN)
class PyarrowSerializer(object):
@staticmethod
def dumps(obj):
"""
Serialize an object.
Returns:
Implementation-dependent bytes-like object.
May not be compatible across different versions of pyarrow.
"""
import pyarrow as pa
return pa.serialize(obj).to_buffer()
@staticmethod
def dumps_bytes(obj):
"""
Returns:
bytes
"""
return PyarrowSerializer.dumps(obj).to_pybytes()
@staticmethod
def loads(buf):
"""
Args:
buf: the output of `dumps` or `dumps_bytes`.
"""
import pyarrow as pa
return pa.deserialize(buf)
class PickleSerializer(object):
@staticmethod
def dumps(obj):
"""
Returns:
bytes
"""
return pickle.dumps(obj, protocol=-1)
@staticmethod
def loads(buf):
"""
Args:
bytes
"""
return pickle.loads(buf)
# Define the default serializer to be used that dumps data to bytes
_DEFAULT_S = os.environ.get('TENSORPACK_SERIALIZE', 'pickle')
if _DEFAULT_S == "pyarrow":
dumps = PyarrowSerializer.dumps_bytes
loads = PyarrowSerializer.loads
elif _DEFAULT_S == "pickle":
dumps = PickleSerializer.dumps
loads = PickleSerializer.loads
else:
dumps = MsgpackSerializer.dumps
loads = MsgpackSerializer.loads
# Define the default serializer to be used for passing data
# among a pair of peers. In this case the deserialization is
# known to happen only once
_DEFAULT_S = os.environ.get('TENSORPACK_ONCE_SERIALIZE', 'pickle')
if _DEFAULT_S == "pyarrow":
dumps_once = PyarrowSerializer.dumps
loads_once = PyarrowSerializer.loads
elif _DEFAULT_S == "pickle":
dumps_once = ForkingPickler.dumps
loads_once = ForkingPickler.loads
else:
dumps_once = MsgpackSerializer.dumps
loads_once = MsgpackSerializer.loads
| 2,979 | 23.227642 | 71 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/naming.py | # -*- coding: utf-8 -*-
# File: naming.py
GLOBAL_STEP_INCR_OP_NAME = 'global_step_incr'
# extra variables to summarize during training in a moving-average way
MOVING_SUMMARY_OPS_KEY = 'MOVING_SUMMARY_OPS'
| 208 | 22.222222 | 70 | py |
SyNet | SyNet-master/tensorpack/tensorpack/utils/gpu.py | # -*- coding: utf-8 -*-
# File: gpu.py
import os
from . import logger
from .concurrency import subproc_call
from .nvml import NVMLContext
from .utils import change_env
__all__ = ['change_gpu', 'get_nr_gpu', 'get_num_gpu']
def change_gpu(val):
"""
Args:
val: an integer, the index of the GPU or -1 to disable GPU.
Returns:
a context where ``CUDA_VISIBLE_DEVICES=val``.
"""
val = str(val)
if val == '-1':
val = ''
return change_env('CUDA_VISIBLE_DEVICES', val)
def get_num_gpu():
"""
Returns:
int: #available GPUs in CUDA_VISIBLE_DEVICES, or in the system.
"""
def warn_return(ret, message):
try:
import tensorflow as tf
except ImportError:
return ret
built_with_cuda = tf.test.is_built_with_cuda()
if not built_with_cuda and ret > 0:
logger.warn(message + "But TensorFlow was not built with CUDA support and could not use GPUs!")
return ret
env = os.environ.get('CUDA_VISIBLE_DEVICES', None)
if env:
return warn_return(len(env.split(',')), "Found non-empty CUDA_VISIBLE_DEVICES. ")
output, code = subproc_call("nvidia-smi -L", timeout=5)
if code == 0:
output = output.decode('utf-8')
return warn_return(len(output.strip().split('\n')), "Found nvidia-smi. ")
try:
# Use NVML to query device properties
with NVMLContext() as ctx:
return warn_return(ctx.num_devices(), "NVML found nvidia devices. ")
except Exception:
# Fallback
logger.info("Loading local devices by TensorFlow ...")
try:
import tensorflow as tf
# available since TF 1.14
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
except AttributeError:
from tensorflow.python.client import device_lib
local_device_protos = device_lib.list_local_devices()
# Note this will initialize all GPUs and therefore has side effect
# https://github.com/tensorflow/tensorflow/issues/8136
gpu_devices = [x.name for x in local_device_protos if x.device_type == 'GPU']
return len(gpu_devices)
get_nr_gpu = get_num_gpu
| 2,254 | 29.066667 | 107 | py |
SyNet | SyNet-master/tensorpack/tensorpack/contrib/keras.py | # -*- coding: utf-8 -*-
# File: keras.py
from contextlib import contextmanager
import six
import tensorflow as tf
from tensorflow import keras
from ..callbacks import Callback, CallbackToHook, InferenceRunner, InferenceRunnerBase, ScalarStats
from ..models.regularize import regularize_cost_from_collection
from ..tfutils.collection import backup_collection, restore_collection
from ..tfutils.common import get_op_tensor_name
from ..tfutils.scope_utils import cached_name_scope
from ..tfutils.summary import add_moving_summary
from ..tfutils.tower import get_current_tower_context
from ..train import SimpleTrainer, SyncMultiGPUTrainerParameterServer, Trainer
from ..train.interface import apply_default_prefetch
from ..train.trainers import DistributedTrainerBase
from ..utils import logger
from ..utils.gpu import get_nr_gpu
__all__ = ['KerasPhaseCallback', 'setup_keras_trainer', 'KerasModel']
TOTAL_LOSS_NAME = 'total_loss'
def _check_name(tensor, name):
tensorname = get_op_tensor_name(tensor.name)[0]
assert tensorname.split('/')[-1] == name, \
"{} does not match {}, you may have name conflict somewhere!".format(tensor.name, name)
class KerasModelCaller(object):
"""
Keras model doesn't support variable scope reuse.
This is a wrapper around keras model to mimic reuse.
"""
def __init__(self, get_model):
self.get_model = get_model
self.cached_model = None
def __call__(self, *input_tensors):
"""
Args:
input_tensors ([tf.Tensor])
Returns:
output tensors of this tower, evaluated with the input tensors.
"""
reuse = tf.get_variable_scope().reuse
old_trainable_names = {x.name for x in tf.trainable_variables()}
trainable_backup = backup_collection([tf.GraphKeys.TRAINABLE_VARIABLES])
update_ops_backup = backup_collection([tf.GraphKeys.UPDATE_OPS])
def post_process_model(model):
added_trainable_names = {x.name for x in tf.trainable_variables()}
restore_collection(trainable_backup)
for v in model.weights:
# In Keras, the collection is not respected and could contain non-trainable vars.
# We put M.weights into the collection instead.
if v.name not in old_trainable_names and v.name in added_trainable_names:
tf.add_to_collection(tf.GraphKeys.TRAINABLE_VARIABLES, v)
new_trainable_names = {x.name for x in tf.trainable_variables()}
for n in added_trainable_names:
if n not in new_trainable_names:
logger.warn("Keras created trainable variable '{}' which is actually not trainable. "
"This was automatically corrected.".format(n))
# Keras models might not use this collection at all (in some versions).
# This is a BC-breaking change of tf.keras: https://github.com/tensorflow/tensorflow/issues/19643
restore_collection(update_ops_backup)
for op in model.updates:
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, op)
if self.cached_model is None:
assert not reuse
# starting from some versions, tf.keras starts to prepend name scope to variable names ..
@contextmanager
def clear_tower0_name_scope():
ns = tf.get_default_graph().get_name_scope()
if ns == 'tower0':
with tf.name_scope('/'):
yield
else:
yield
with clear_tower0_name_scope():
model = self.cached_model = self.get_model(*input_tensors)
assert isinstance(model, keras.Model), \
"Your get_model function should return a `tf.keras.Model`!"
outputs = model.outputs
elif reuse:
# use the cached Keras model to mimic reuse
# NOTE: ctx.is_training won't be useful inside model,
# because inference will always use the cached Keras model
model = self.cached_model
outputs = model.call(*input_tensors)
else:
# create new Keras model if not reuse
model = self.get_model(*input_tensors)
outputs = model.outputs
post_process_model(model)
if isinstance(outputs, list) and len(outputs) == 1:
return outputs[0]
return outputs
class KerasPhaseCallback(Callback):
"""
Keras needs an extra input if learning_phase is used by the model
This callback will be used:
1. By the trainer with isTrain=True
2. By InferenceRunner with isTrain=False, in the form of hooks
If you use :class:`KerasModel` or :func:`setup_keras_trainer`,
this callback will be automatically added when needed.
"""
def __init__(self, isTrain):
assert isinstance(isTrain, bool), isTrain
self._isTrain = isTrain
self._learning_phase = keras.backend.learning_phase()
def _setup_graph(self):
logger.info("Using Keras learning phase {} in the graph!".format(
self._learning_phase.name))
cbs = self.trainer._callbacks.cbs
for cb in cbs:
# XXX HACK
if isinstance(cb, InferenceRunnerBase):
h = CallbackToHook(KerasPhaseCallback(False))
cb.register_hook(h)
def _before_run(self, ctx):
return tf.train.SessionRunArgs(
fetches=[], feed_dict={self._learning_phase: int(self._isTrain)})
def setup_keras_trainer(
trainer, get_model,
input_signature, target_signature,
input, optimizer, loss, metrics):
"""
Args:
trainer (SingleCostTrainer):
get_model (input1, input2, ... -> tf.keras.Model):
A function which takes tensors, builds and returns a Keras model.
It will be part of the tower function.
input (InputSource):
optimizer (tf.train.Optimizer):
loss, metrics: list of strings
"""
assert isinstance(optimizer, tf.train.Optimizer), optimizer
assert isinstance(loss, list), loss
assert len(loss) >= 1, "No loss was given!"
assert isinstance(metrics, list), metrics
model_caller = KerasModelCaller(get_model)
nr_inputs = len(input_signature)
def get_cost(*inputs):
ctx = get_current_tower_context()
input_tensors = list(inputs[:nr_inputs])
target_tensors = list(inputs[nr_inputs:])
# TODO mapping between target tensors & output tensors
outputs = model_caller(*input_tensors)
if isinstance(outputs, tf.Tensor):
outputs = [outputs]
assert len(outputs) == len(target_tensors), \
"len({}) != len({})".format(str(outputs), str(target_tensors))
assert len(outputs) == len(loss), \
"len({}) != len({})".format(str(outputs), str(loss))
loss_tensors = []
for idx, loss_name in enumerate(loss):
with cached_name_scope('keras_loss', top_level=False):
loss_fn = keras.losses.get(loss_name)
curr_loss = loss_fn(target_tensors[idx], outputs[idx])
curr_loss = tf.reduce_mean(curr_loss, name=loss_name)
_check_name(curr_loss, loss_name)
loss_tensors.append(curr_loss)
loss_reg = regularize_cost_from_collection()
if loss_reg is not None:
total_loss = tf.add_n(loss_tensors + [loss_reg], name=TOTAL_LOSS_NAME)
add_moving_summary(loss_reg, total_loss, *loss_tensors)
else:
total_loss = tf.add_n(loss_tensors, name=TOTAL_LOSS_NAME)
add_moving_summary(total_loss, *loss_tensors)
if metrics and (ctx.is_main_training_tower or not ctx.is_training):
# for list: one metric for each output
metric_tensors = []
for oid, metric_name in enumerate(metrics):
output_tensor = outputs[oid]
target_tensor = target_tensors[oid] # TODO may not have the same mapping?
with cached_name_scope('keras_metric', top_level=False):
metric_fn = keras.metrics.get(metric_name)
metric_tensor = metric_fn(target_tensor, output_tensor)
metric_tensor = tf.reduce_mean(metric_tensor, name=metric_name)
_check_name(metric_tensor, metric_name)
# check name conflict here
metric_tensors.append(metric_tensor)
add_moving_summary(*metric_tensors)
return total_loss
trainer.setup_graph(
input_signature + target_signature,
input,
get_cost,
lambda: optimizer)
if isinstance(keras.backend.learning_phase(), tf.Tensor) and len(keras.backend.learning_phase().consumers()) > 0:
# check if learning_phase is used in this model
trainer.register_callback(KerasPhaseCallback(True))
class KerasModel(object):
def __init__(self, get_model, input_signature=None, target_signature=None,
input=None, trainer=None):
"""
Args:
get_model (input1, input2, ... -> keras.Model):
A function which takes tensors, builds and returns a Keras model.
It will be part of the tower function.
input_signature ([tf.TensorSpec]): required. The signature for inputs.
target_signature ([tf.TensorSpec]): required. The signature for the targets tensors.
input (InputSource | DataFlow): the InputSource or DataFlow where the input data comes from.
trainer (Trainer): the default will check the number of available GPUs and use them all.
"""
self.get_model = get_model
assert callable(get_model), get_model
self.input_signature = input_signature
self.target_signature = target_signature
if trainer is None:
nr_gpu = get_nr_gpu()
if nr_gpu <= 1:
trainer = SimpleTrainer()
else:
# the default multi-gpu trainer
trainer = SyncMultiGPUTrainerParameterServer(nr_gpu)
assert isinstance(trainer, Trainer), trainer
assert not isinstance(trainer, DistributedTrainerBase)
assert input is not None, "Argument 'input' is required!"
self.input = apply_default_prefetch(input, trainer)
self.trainer = trainer
def compile(self, optimizer, loss, metrics=None):
"""
Args:
optimizer (tf.train.Optimizer):
loss, metrics: string or list of strings
"""
if isinstance(loss, six.string_types):
loss = [loss]
if metrics is None:
metrics = []
if isinstance(metrics, six.string_types):
metrics = [metrics]
self._stats_to_inference = loss + metrics + [TOTAL_LOSS_NAME]
setup_keras_trainer(
self.trainer, get_model=self.get_model,
input_signature=self.input_signature,
target_signature=self.target_signature,
input=self.input,
optimizer=optimizer,
loss=loss,
metrics=metrics)
def fit(self, validation_data=None, **kwargs):
"""
Args:
validation_data (DataFlow or InputSource): to be used for inference.
The inference callback is added as the first in the callback list.
If you need to use it in a different order, please write it in the callback list manually.
kwargs: same arguments as :meth:`Trainer.train_with_defaults`.
"""
callbacks = kwargs.pop('callbacks', [])
if validation_data is not None:
# There is no way to guess where users want this callback. So we have to choose one.
# MinSaver may need results from this callback,
# so we put this callback at first.
callbacks.insert(0, InferenceRunner(
validation_data, ScalarStats(self._stats_to_inference)))
self.trainer.train_with_defaults(callbacks=callbacks, **kwargs)
| 12,196 | 40.345763 | 117 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/base.py | # -*- coding: utf-8 -*-
# File: base.py
from abc import ABCMeta, abstractmethod
import six
import tensorflow as tf
from ..input_source import PlaceholderInput
from ..tfutils.common import get_tensors_by_names, get_op_tensor_name
from ..tfutils.tower import PredictTowerContext
__all__ = ['PredictorBase',
'OnlinePredictor', 'OfflinePredictor']
@six.add_metaclass(ABCMeta)
class PredictorBase(object):
"""
Base class for all predictors.
Attributes:
return_input (bool): whether the call will also return (inputs, outputs)
or just outputs
"""
def __call__(self, *dp):
"""
Call the predictor on some inputs.
Example:
When you have a predictor defined with two inputs, call it with:
.. code-block:: python
predictor(e1, e2)
Returns:
list[array]: list of outputs
"""
output = self._do_call(dp)
if self.return_input:
return (dp, output)
else:
return output
@abstractmethod
def _do_call(self, dp):
"""
Args:
dp: input datapoint. must have the same length as input_names
Returns:
output as defined by the config
"""
class AsyncPredictorBase(PredictorBase):
""" Base class for all async predictors. """
@abstractmethod
def put_task(self, dp, callback=None):
"""
Args:
dp (list): A datapoint as inputs. It could be either batched or not
batched depending on the predictor implementation).
callback: a thread-safe callback to get called with
either outputs or (inputs, outputs), if `return_input` is True.
Returns:
concurrent.futures.Future: a Future of results
"""
@abstractmethod
def start(self):
""" Start workers """
def _do_call(self, dp):
fut = self.put_task(dp)
# in Tornado, Future.result() doesn't wait
return fut.result()
class OnlinePredictor(PredictorBase):
"""
A predictor which directly use an existing session and given tensors.
Attributes:
sess: The tf.Session object associated with this predictor.
"""
ACCEPT_OPTIONS = False
""" See Session.make_callable """
def __init__(self, input_tensors, output_tensors,
return_input=False, sess=None):
"""
Args:
input_tensors (list): list of names.
output_tensors (list): list of names.
return_input (bool): same as :attr:`PredictorBase.return_input`.
sess (tf.Session): the session this predictor runs in. If None,
will use the default session at the first call.
Note that in TensorFlow, default session is thread-local.
"""
def normalize_name(t):
if isinstance(t, six.string_types):
return get_op_tensor_name(t)[1]
return t
self.return_input = return_input
self.input_tensors = [normalize_name(x) for x in input_tensors]
self.output_tensors = [normalize_name(x) for x in output_tensors]
self.sess = sess
if sess is not None:
self._callable = sess.make_callable(
fetches=output_tensors,
feed_list=input_tensors,
accept_options=self.ACCEPT_OPTIONS)
else:
self._callable = None
def _do_call(self, dp):
assert len(dp) == len(self.input_tensors), \
"{} != {}".format(len(dp), len(self.input_tensors))
if self.sess is None:
self.sess = tf.get_default_session()
assert self.sess is not None, "Predictor isn't called under a default session!"
if self._callable is None:
self._callable = self.sess.make_callable(
fetches=self.output_tensors,
feed_list=self.input_tensors,
accept_options=self.ACCEPT_OPTIONS)
# run_metadata = tf.RunMetadata()
# options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
return self._callable(*dp)
class OfflinePredictor(OnlinePredictor):
""" A predictor built from a given config.
A single-tower model will be built without any prefix.
Example:
.. code-block:: python
config = PredictConfig(model=my_model,
inputs_names=['image'],
output_names=['linear/output', 'prediction'])
predictor = OfflinePredictor(config)
batch_image = np.random.rand(1, 100, 100, 3)
batch_output, batch_prediction = predictor(batch_image)
"""
def __init__(self, config):
"""
Args:
config (PredictConfig): the config to use.
"""
self.graph = config._maybe_create_graph()
with self.graph.as_default():
input = PlaceholderInput()
input.setup(config.input_signature)
with PredictTowerContext(''):
config.tower_func(*input.get_input_tensors())
input_tensors = get_tensors_by_names(config.input_names)
output_tensors = get_tensors_by_names(config.output_names)
config.session_init._setup_graph()
sess = config.session_creator.create_session()
config.session_init._run_init(sess)
super(OfflinePredictor, self).__init__(
input_tensors, output_tensors, config.return_input, sess)
| 5,588 | 30.937143 | 91 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/concurrency.py | # -*- coding: utf-8 -*-
# File: concurrency.py
import multiprocessing
import numpy as np
from concurrent.futures import Future
import tensorflow as tf
from six.moves import queue, range
from ..compat import tfv1
from ..tfutils.model_utils import describe_trainable_vars
from ..utils import logger
from ..utils.concurrency import DIE, ShareSessionThread, StoppableThread
from .base import AsyncPredictorBase, OfflinePredictor, OnlinePredictor
__all__ = ['MultiThreadAsyncPredictor']
class MultiProcessPredictWorker(multiprocessing.Process):
""" Base class for predict worker that runs offline in multiprocess"""
def __init__(self, idx, config):
"""
Args:
idx (int): index of the worker. the 0th worker will print log.
config (PredictConfig): the config to use.
"""
super(MultiProcessPredictWorker, self).__init__()
self.name = "MultiProcessPredictWorker-{}".format(idx)
self.idx = idx
self.config = config
def _init_runtime(self):
""" Call _init_runtime under different CUDA_VISIBLE_DEVICES, you'll
have workers that run on multiGPUs
"""
if self.idx != 0:
from tensorpack.models.registry import disable_layer_logging
disable_layer_logging()
self.predictor = OfflinePredictor(self.config)
if self.idx == 0:
with self.predictor.graph.as_default():
describe_trainable_vars()
class MultiProcessQueuePredictWorker(MultiProcessPredictWorker):
"""
An offline predictor worker that takes input and produces output by queue.
Each process will exit when they see :class:`DIE`.
"""
def __init__(self, idx, inqueue, outqueue, config):
"""
Args:
idx, config: same as in :class:`MultiProcessPredictWorker`.
inqueue (multiprocessing.Queue): input queue to get data point. elements are (task_id, dp)
outqueue (multiprocessing.Queue): output queue to put result. elements are (task_id, output)
"""
super(MultiProcessQueuePredictWorker, self).__init__(idx, config)
self.inqueue = inqueue
self.outqueue = outqueue
assert isinstance(self.inqueue, multiprocessing.queues.Queue)
assert isinstance(self.outqueue, multiprocessing.queues.Queue)
def run(self):
self._init_runtime()
while True:
tid, dp = self.inqueue.get()
if tid == DIE:
self.outqueue.put((DIE, None))
return
else:
self.outqueue.put((tid, self.predictor(*dp)))
class PredictorWorkerThread(StoppableThread, ShareSessionThread):
def __init__(self, queue, pred_func, id, batch_size=5):
super(PredictorWorkerThread, self).__init__()
self.name = "PredictorWorkerThread-{}".format(id)
self.queue = queue
self.func = pred_func
self.daemon = True
self.batch_size = batch_size
self.id = id
def run(self):
with self.default_sess():
while not self.stopped():
batched, futures = self.fetch_batch()
try:
outputs = self.func(*batched)
except tf.errors.CancelledError:
for f in futures:
f.cancel()
logger.warn("In PredictorWorkerThread id={}, call was cancelled.".format(self.id))
return
# print "Worker {} batched {} Queue {}".format(
# self.id, len(futures), self.queue.qsize())
# debug, for speed testing
# if not hasattr(self, 'xxx'):
# self.xxx = outputs = self.func(batched)
# else:
# outputs = [[self.xxx[0][0]] * len(batched[0]), [self.xxx[1][0]] * len(batched[0])]
for idx, f in enumerate(futures):
f.set_result([k[idx] for k in outputs])
def fetch_batch(self):
""" Fetch a batch of data without waiting"""
inp, f = self.queue.get()
nr_input_var = len(inp)
batched, futures = [[] for _ in range(nr_input_var)], []
for k in range(nr_input_var):
batched[k].append(inp[k])
futures.append(f)
while len(futures) < self.batch_size:
try:
inp, f = self.queue.get_nowait()
for k in range(nr_input_var):
batched[k].append(inp[k])
futures.append(f)
except queue.Empty:
break # do not wait
for k in range(nr_input_var):
batched[k] = np.asarray(batched[k])
return batched, futures
class MultiThreadAsyncPredictor(AsyncPredictorBase):
"""
An multithreaded online async predictor which runs a list of OnlinePredictor.
It would do an extra batching internally.
"""
def __init__(self, predictors, batch_size=5):
"""
Args:
predictors (list): a list of OnlinePredictor available to use.
batch_size (int): the maximum of an internal batch.
"""
assert len(predictors)
self._need_default_sess = False
for k in predictors:
assert isinstance(k, OnlinePredictor), type(k)
if k.sess is None:
self._need_default_sess = True
# TODO support predictors.return_input here
assert not k.return_input
self.input_queue = queue.Queue(maxsize=len(predictors) * 100)
self.threads = [
PredictorWorkerThread(
self.input_queue, f, id, batch_size=batch_size)
for id, f in enumerate(predictors)]
def start(self):
if self._need_default_sess:
assert tfv1.get_default_session() is not None, \
"Not session is bind to predictors, " \
"MultiThreadAsyncPredictor.start() has to be called under a default session!"
for t in self.threads:
t.start()
def put_task(self, dp, callback=None):
"""
Args:
dp (list): A datapoint as inputs. It could be either batched or not
batched depending on the predictor implementation).
callback: a thread-safe callback. When the results are ready, it will be called
with the "future" object.
Returns:
concurrent.futures.Future: a Future of results.
"""
f = Future()
if callback is not None:
f.add_done_callback(callback)
self.input_queue.put((dp, f))
return f
| 6,665 | 36.033333 | 104 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/dataset.py | # -*- coding: utf-8 -*-
# File: dataset.py
import multiprocessing
import os
from abc import ABCMeta, abstractmethod
import six
from ..dataflow import DataFlow
from ..dataflow.remote import dump_dataflow_to_process_queue
from ..utils import logger
from ..utils.develop import HIDE_DOC
from ..utils.concurrency import DIE, OrderedResultGatherProc, ensure_proc_terminate
from ..utils.gpu import change_gpu, get_num_gpu
from ..utils.utils import get_tqdm
from .base import OfflinePredictor
from .concurrency import MultiProcessQueuePredictWorker
from .config import PredictConfig
__all__ = ['DatasetPredictorBase', 'SimpleDatasetPredictor',
'MultiProcessDatasetPredictor']
@six.add_metaclass(ABCMeta)
class DatasetPredictorBase(object):
""" Base class for dataset predictors.
These are predictors which run over a :class:`DataFlow`.
"""
def __init__(self, config, dataset):
"""
Args:
config (PredictConfig): the config of predictor.
dataset (DataFlow): the DataFlow to run on.
"""
assert isinstance(dataset, DataFlow)
assert isinstance(config, PredictConfig)
self.config = config
self.dataset = dataset
@abstractmethod
def get_result(self):
"""
Yields:
output for each datapoint in the DataFlow.
"""
pass
def get_all_result(self):
"""
Returns:
list: all outputs for all datapoints in the DataFlow.
"""
return list(self.get_result())
class SimpleDatasetPredictor(DatasetPredictorBase):
"""
Simply create one predictor and run it on the DataFlow.
"""
def __init__(self, config, dataset):
super(SimpleDatasetPredictor, self).__init__(config, dataset)
self.predictor = OfflinePredictor(config)
@HIDE_DOC
def get_result(self):
self.dataset.reset_state()
try:
sz = len(self.dataset)
except NotImplementedError:
sz = 0
with get_tqdm(total=sz, disable=(sz == 0)) as pbar:
for dp in self.dataset:
res = self.predictor(*dp)
yield res
pbar.update()
class MultiProcessDatasetPredictor(DatasetPredictorBase):
"""
Run prediction in multiple processes, on either CPU or GPU.
Each process fetch datapoints as tasks and run predictions independently.
"""
# TODO allow unordered
def __init__(self, config, dataset, nr_proc, use_gpu=True, ordered=True):
"""
Args:
config: same as in :class:`DatasetPredictorBase`.
dataset: same as in :class:`DatasetPredictorBase`.
nr_proc (int): number of processes to use
use_gpu (bool): use GPU or CPU.
If GPU, then ``nr_proc`` cannot be more than what's in
CUDA_VISIBLE_DEVICES.
ordered (bool): produce outputs in the original order of the
datapoints. This will be a bit slower. Otherwise, :meth:`get_result` will produce
outputs in any order.
"""
if config.return_input:
logger.warn("Using the option `return_input` in MultiProcessDatasetPredictor might be slow")
assert nr_proc >= 1, nr_proc
super(MultiProcessDatasetPredictor, self).__init__(config, dataset)
self.nr_proc = nr_proc
self.ordered = ordered
self.inqueue, self.inqueue_proc = dump_dataflow_to_process_queue(
self.dataset, nr_proc * 2, self.nr_proc) # put (idx, dp) to inqueue
if use_gpu:
try:
gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
except KeyError:
gpus = list(range(get_num_gpu()))
assert len(gpus) >= self.nr_proc, \
"nr_proc={} while only {} gpus available".format(
self.nr_proc, len(gpus))
else:
gpus = ['-1'] * self.nr_proc
# worker produces (idx, result) to outqueue
self.outqueue = multiprocessing.Queue()
self.workers = [MultiProcessQueuePredictWorker(
i, self.inqueue, self.outqueue, self.config)
for i in range(self.nr_proc)]
# start inqueue and workers
self.inqueue_proc.start()
for p, gpuid in zip(self.workers, gpus):
if gpuid == '-1':
logger.info("Worker {} uses CPU".format(p.idx))
else:
logger.info("Worker {} uses GPU {}".format(p.idx, gpuid))
with change_gpu(gpuid):
p.start()
if ordered:
self.result_queue = OrderedResultGatherProc(
self.outqueue, nr_producer=self.nr_proc)
self.result_queue.start()
ensure_proc_terminate(self.result_queue)
else:
self.result_queue = self.outqueue
ensure_proc_terminate(self.workers + [self.inqueue_proc])
@HIDE_DOC
def get_result(self):
try:
sz = len(self.dataset)
except NotImplementedError:
sz = 0
with get_tqdm(total=sz, disable=(sz == 0)) as pbar:
die_cnt = 0
while True:
res = self.result_queue.get()
pbar.update()
if res[0] != DIE:
yield res[1]
else:
die_cnt += 1
if die_cnt == self.nr_proc:
break
self.inqueue_proc.join()
self.inqueue_proc.terminate()
if self.ordered: # if ordered, than result_queue is a Process
self.result_queue.join()
self.result_queue.terminate()
for p in self.workers:
p.join()
p.terminate()
| 5,787 | 32.847953 | 104 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/feedfree.py | #!/usr/bin/env python
from tensorflow.python.training.monitored_session import _HookedSession as HookedSession
from ..callbacks import Callbacks
from ..tfutils.tower import PredictTowerContext
from .base import PredictorBase
__all__ = ['FeedfreePredictor']
class FeedfreePredictor(PredictorBase):
"""
Create a predictor that takes inputs from an :class:`InputSource`, instead of from feeds.
An instance `pred` of :class:`FeedfreePredictor` can be called only by `pred()`, which returns
a list of output values as defined in config.output_names.
"""
def __init__(self, config, input_source):
"""
Args:
config (PredictConfig): the config to use.
input_source (InputSource): the feedfree InputSource to use.
Must match the signature of the tower function in config.
"""
self._config = config
self._input_source = input_source
assert config.return_input is False, \
"return_input is not supported in FeedfreePredictor! " \
"If you need to fetch inputs, add the names to the output_names!"
self._hooks = []
self.graph = config._maybe_create_graph()
with self.graph.as_default():
self._input_callbacks = Callbacks(
self._input_source.setup(config.input_signature))
with PredictTowerContext(''):
self._input_tensors = self._input_source.get_input_tensors()
config.tower_func(*self._input_tensors)
self._tower_handle = config.tower_func.towers[-1]
self._output_tensors = self._tower_handle.get_tensors(config.output_names)
self._input_callbacks.setup_graph(None)
for h in self._input_callbacks.get_hooks():
self._register_hook(h)
self._initialize_session()
def _register_hook(self, hook):
"""
Args:
hook (tf.train.SessionRunHook):
"""
self._hooks.append(hook)
def _initialize_session(self):
# init the session
self._config.session_init._setup_graph()
self._sess = self._config.session_creator.create_session()
self._config.session_init._run_init(self._sess)
with self._sess.as_default():
self._input_callbacks.before_train()
self._hooked_sess = HookedSession(self._sess, self._hooks)
def __call__(self):
return self._hooked_sess.run(self._output_tensors)
def _do_call(self):
raise NotImplementedError("You're calling the wrong function!")
| 2,598 | 35.097222 | 98 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/config.py | # -*- coding: utf-8 -*-
# File: config.py
import six
from ..compat import tfv1 as tf
from ..train.model_desc import ModelDescBase
from ..tfutils.sessinit import JustCurrentSession, SessionInit
from ..tfutils.sesscreate import NewSessionCreator
from ..tfutils.tower import TowerFunc
from ..utils import logger
__all__ = ['PredictConfig']
class PredictConfig(object):
def __init__(self,
model=None,
tower_func=None,
input_signature=None,
input_names=None,
output_names=None,
session_creator=None,
session_init=None,
return_input=False,
create_graph=True,
):
"""
Users need to provide enough arguments to create a tower function,
which will be used to construct the graph.
This can be provided in the following ways:
1. `model`: a :class:`ModelDesc` instance. It will contain a tower function by itself.
2. `tower_func`: a :class:`tfutils.TowerFunc` instance.
Provide a tower function instance directly.
3. `tower_func`: a symbolic function and `input_signature`: the signature of the function.
Provide both a function and its signature.
Example:
.. code-block:: python
config = PredictConfig(model=my_model,
inputs_names=['image'],
output_names=['linear/output', 'prediction'])
Args:
model (ModelDescBase): to be used to construct a tower function.
tower_func: a callable which takes input tensors (by positional args) and construct a tower.
or a :class:`tfutils.TowerFunc` instance.
input_signature ([tf.TensorSpec]): if tower_func is a plain function (instead of a TowerFunc),
this describes the list of inputs it takes.
input_names (list): a list of input tensor names. Defaults to match input_signature.
The name can be either the name of a tensor, or the name of one input of the tower.
output_names (list): a list of names of the output tensors to predict, the
tensors can be any tensor in the graph that's computable from the tensors correponding to `input_names`.
session_creator (tf.train.SessionCreator): how to create the
session. Defaults to :class:`NewSessionCreator()`.
session_init (SessionInit): how to initialize variables of the session.
Defaults to do nothing.
return_input (bool): same as in :attr:`PredictorBase.return_input`.
create_graph (bool): create a new graph, or use the default graph
when predictor is first initialized.
"""
def assert_type(v, tp, name):
assert isinstance(v, tp), \
"Argument '{}' has to be type '{}', but an object of type '{}' found.".format(
name, tp.__name__, v.__class__.__name__)
if model is not None:
assert_type(model, ModelDescBase, 'model')
assert input_signature is None and tower_func is None
self.input_signature = model.get_input_signature()
self.tower_func = TowerFunc(model.build_graph, self.input_signature)
else:
if isinstance(tower_func, TowerFunc):
input_signature = tower_func.input_signature
assert input_signature is not None and tower_func is not None
self.input_signature = input_signature
self.tower_func = TowerFunc(tower_func, input_signature)
if session_init is None:
session_init = JustCurrentSession()
self.session_init = session_init
assert_type(self.session_init, SessionInit, 'session_init')
if session_creator is None:
self.session_creator = NewSessionCreator()
else:
self.session_creator = session_creator
# inputs & outputs
self.input_names = input_names
if self.input_names is None:
self.input_names = [k.name for k in self.input_signature]
assert output_names is not None, "Argument 'output_names' is not provided!"
self.output_names = output_names
assert_type(self.output_names, list, 'output_names')
assert_type(self.input_names, list, 'input_names')
if len(self.input_names) == 0:
logger.warn('PredictConfig receives empty "input_names".')
for v in self.input_names:
assert_type(v, six.string_types, 'Each item in input_names')
assert len(self.output_names), "Argument 'output_names' cannot be empty!"
self.return_input = bool(return_input)
self.create_graph = bool(create_graph)
def _maybe_create_graph(self):
if self.create_graph:
return tf.Graph()
return tf.get_default_graph()
| 5,002 | 41.042017 | 120 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .base import *
from .concurrency import *
from .config import *
from .dataset import *
from .multigpu import *
from pkgutil import iter_modules
import os
import os.path
__all__ = []
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else dir(p)
if lst:
del globals()[name]
for k in lst:
globals()[k] = p.__dict__[k]
__all__.append(k)
_CURR_DIR = os.path.dirname(__file__)
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if module_name.startswith('_'):
continue
global_import(module_name)
| 1,015 | 23.780488 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/predict/multigpu.py | # -*- coding: utf-8 -*-
# File: multigpu.py
import tensorflow as tf
from ..input_source import PlaceholderInput
from ..tfutils.tower import PredictTowerContext
from ..utils import logger
from .base import OnlinePredictor
__all__ = ['MultiTowerOfflinePredictor',
'DataParallelOfflinePredictor']
class MultiTowerOfflinePredictor(OnlinePredictor):
""" A multi-tower multi-GPU predictor.
It builds one predictor for each tower.
"""
def __init__(self, config, towers):
"""
Args:
config (PredictConfig): the config to use.
towers: a list of relative GPU id.
"""
assert len(towers) > 0
self.graph = config._maybe_create_graph()
self.predictors = []
self.return_input = config.return_input
with self.graph.as_default():
handles = []
input = PlaceholderInput()
input.setup(config.input_signature)
for idx, t in enumerate(towers):
tower_name = 'tower' + str(t)
device = '/gpu:{}'.format(t)
with tf.variable_scope(tf.get_variable_scope(), reuse=idx > 0), \
tf.device(device), \
PredictTowerContext(tower_name):
logger.info("Building graph for predict tower '{}' on device {} ...".format(tower_name, device))
config.tower_func(*input.get_input_tensors())
handles.append(config.tower_func.towers[-1])
config.session_init._setup_graph()
self.sess = config.session_creator.create_session()
config.session_init._run_init(self.sess)
for h in handles:
input_tensors = h.get_tensors(config.input_names)
output_tensors = h.get_tensors(config.output_names)
self.predictors.append(OnlinePredictor(
input_tensors, output_tensors, config.return_input, self.sess))
def _do_call(self, dp):
# use the first tower for compatible PredictorBase interface
return self.predictors[0]._do_call(dp)
def get_predictor(self, n):
"""
Returns:
OnlinePredictor: the nth predictor on the nth tower.
"""
l = len(self.predictors)
if n >= l:
logger.warn("n > #towers, will assign predictor to GPU by round-robin")
return [self.predictors[k % l] for k in range(n)]
def get_predictors(self):
"""
Returns:
list[OnlinePredictor]: a list of predictor
"""
return self.predictors
class DataParallelOfflinePredictor(OnlinePredictor):
"""
A data-parallel predictor. It builds one predictor that utilizes all GPUs.
Note that it doesn't split/concat inputs/outputs automatically.
Instead, its inputs are:
``[input[0] in tower[0], input[1] in tower[0], ..., input[0] in tower[1], input[1] in tower[1], ...]``
Similar for the outputs.
"""
def __init__(self, config, towers):
"""
Args:
config (PredictConfig): the config to use.
towers: a list of relative GPU id.
"""
self.graph = config._maybe_create_graph()
with self.graph.as_default():
input_tensors = []
output_tensors = []
for idx, t in enumerate(towers):
tower_name = 'tower' + str(t)
new_sig = [tf.TensorSpec(dtype=p.dtype, shape=p.shape, name=tower_name + '_' + p.name)
for p in config.input_signature]
input = PlaceholderInput()
input.setup(new_sig)
with tf.variable_scope(tf.get_variable_scope(), reuse=idx > 0), \
tf.device('/gpu:{}'.format(t)), \
PredictTowerContext(tower_name):
config.tower_func(*input.get_input_tensors())
h = config.tower_func.towers[-1]
input_tensors.extend(h.get_tensors(config.input_names))
output_tensors.extend(h.get_tensors(config.output_names))
config.session_init._setup_graph()
sess = config.session_creator.create_session()
config.session_init._run_init(sess)
super(DataParallelOfflinePredictor, self).__init__(
input_tensors, output_tensors, config.return_input, sess)
| 4,443 | 35.42623 | 116 | py |
SyNet | SyNet-master/tensorpack/tensorpack/graph_builder/training.py | # -*- coding: utf-8 -*-
# File: training.py
import copy
import pprint
import re
from abc import ABCMeta, abstractmethod
from contextlib import contextmanager
import six
import tensorflow as tf
from ..compat import tfv1
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.gradproc import ScaleGradient
from ..tfutils.tower import TrainTowerContext
from ..utils import logger
from ..utils.develop import HIDE_DOC
from .utils import (
GradientPacker, LeastLoadedDeviceSetter, aggregate_grads, allreduce_grads, allreduce_grads_hierarchical,
merge_grad_list, override_to_local_variable, split_grad_list)
__all__ = ["DataParallelBuilder"]
@six.add_metaclass(ABCMeta)
class GraphBuilder(object):
@abstractmethod
def build(*args, **kwargs):
pass
@contextmanager
def _maybe_reuse_vs(reuse):
if reuse:
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
yield
else:
yield
class DataParallelBuilder(GraphBuilder):
def __init__(self, towers):
"""
Args:
towers(list[int]): list of GPU ids.
"""
if len(towers) > 1:
logger.info("[DataParallel] Training a model of {} towers.".format(len(towers)))
if not tf.test.is_built_with_cuda():
logger.error("[DataParallel] TensorFlow was not built with CUDA support!")
self.towers = towers
@staticmethod
def _check_grad_list(grad_list):
"""
Args:
grad_list: list of list of tuples, shape is Ngpu x Nvar x 2
"""
nvars = [len(k) for k in grad_list]
def basename(x):
return re.sub('tower[0-9]+/', '', x.op.name)
if len(set(nvars)) != 1:
names_per_gpu = [{basename(k[1]) for k in grad_and_vars} for grad_and_vars in grad_list]
inters = copy.copy(names_per_gpu[0])
for s in names_per_gpu:
inters &= s
for s in names_per_gpu:
s -= inters
logger.error("Unique trainable variables on towers: " + pprint.pformat(names_per_gpu))
raise ValueError("Number of gradients from each tower is different! " + str(nvars))
@staticmethod
def call_for_each_tower(
towers, func, devices=None, use_vs=None):
"""
Run `func` on all GPUs (towers) and return the results.
Args:
towers (list[int]): a list of GPU id.
func: a lambda to be called inside each tower
devices: a list of devices to be used. By default will use '/gpu:{tower}'
use_vs (list[bool]): list of use_vs to passed to TowerContext
Returns:
List of outputs of ``func``, evaluated on each tower.
"""
ret = []
if devices is not None:
assert len(devices) == len(towers)
if use_vs is not None:
assert len(use_vs) == len(towers)
tower_names = ['tower{}'.format(idx) for idx in range(len(towers))]
for idx, t in enumerate(towers):
device = devices[idx] if devices is not None else '/gpu:{}'.format(t)
usevs = use_vs[idx] if use_vs is not None else False
reuse = not usevs and idx > 0
with tfv1.device(device), _maybe_reuse_vs(reuse), TrainTowerContext(
tower_names[idx],
vs_name=tower_names[idx] if usevs else '',
index=idx, total=len(towers)):
if len(str(device)) < 10: # a device function doesn't have good string description
logger.info("Building graph for training tower {} on device {} ...".format(idx, device))
else:
logger.info("Building graph for training tower {} ...".format(idx))
# When use_vs is True, use LOCAL_VARIABLES,
# so these duplicated variables won't be saved by default.
with override_to_local_variable(enable=usevs):
ret.append(func())
return ret
@staticmethod
@HIDE_DOC
def build_on_towers(*args, **kwargs):
return DataParallelBuilder.call_for_each_tower(*args, **kwargs)
class SyncMultiGPUParameterServerBuilder(DataParallelBuilder):
"""
Data-parallel training in 'ParameterServer' mode.
It builds one tower on each GPU with
shared variable scope. It synchronizes the gradients computed
from each tower, averages them and applies to the shared variables.
It is an equivalent of ``--variable_update=parameter_server`` in
`tensorflow/benchmarks <https://github.com/tensorflow/benchmarks>`_.
"""
def __init__(self, towers, ps_device):
"""
Args:
towers(list[int]): list of GPU id
ps_device (str): either 'gpu' or 'cpu', where variables are stored.
"""
super(SyncMultiGPUParameterServerBuilder, self).__init__(towers)
assert ps_device in ['cpu', 'gpu']
self.ps_device = ps_device
def call_for_each_tower(self, tower_fn):
"""
Call the function `tower_fn` under :class:`TowerContext` for each tower.
Returns:
a list, contains the return values of `tower_fn` on each tower.
"""
raw_devices = ['/gpu:{}'.format(k) for k in self.towers]
if self.ps_device == 'gpu':
devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices]
else:
devices = [tf.train.replica_device_setter(
worker_device=d, ps_device='/cpu:0', ps_tasks=1) for d in raw_devices]
return DataParallelBuilder.build_on_towers(self.towers, tower_fn, devices)
def build(self, grad_list, get_opt_fn):
"""
Reduce the gradients, apply them with the optimizer,
and set self.grads to a list of (g, v), containing the averaged gradients.
Args:
grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.
get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer
Returns:
tf.Operation: the training op
"""
assert len(grad_list) == len(self.towers)
DataParallelBuilder._check_grad_list(grad_list)
# debug tower performance (without update):
# ops = [k[0] for k in grad_list[1]] + [k[0] for k in grad_list[0]]
# self.train_op = tf.group(*ops)
# return
self.grads = aggregate_grads(grad_list, colocation=True)
# grads = grad_list[0]
opt = get_opt_fn()
if self.ps_device == 'cpu':
with tf.device('/cpu:0'):
train_op = opt.apply_gradients(self.grads, name='train_op')
else:
train_op = opt.apply_gradients(self.grads, name='train_op')
return train_op
class SyncMultiGPUReplicatedBuilder(DataParallelBuilder):
"""
Data-parallel training in "replicated" mode,
where each GPU contains a replicate of the whole model.
It will build one tower on each GPU under its own variable scope.
Each gradient update is averaged or summed across or GPUs through NCCL.
It is an equivalent of ``--variable_update=replicated`` in
`tensorflow/benchmarks <https://github.com/tensorflow/benchmarks>`_.
"""
def __init__(self, towers, average, mode):
super(SyncMultiGPUReplicatedBuilder, self).__init__(towers)
self._average = average
assert mode in ['nccl', 'cpu', 'hierarchical'], mode
self._mode = mode
if self._mode == 'hierarchical' and len(towers) != 8:
logger.warn("mode='hierarchical' require >= 8 GPUs. Fallback to mode='nccl'.")
self._mode = 'nccl'
def call_for_each_tower(self, tower_fn):
"""
Call the function `tower_fn` under :class:`TowerContext` for each tower.
Returns:
a list, contains the return values of `tower_fn` on each tower.
"""
# if tower_fn returns [(grad, var), ...], this returns #GPU x #VAR x 2
return DataParallelBuilder.build_on_towers(
self.towers,
tower_fn,
# use no variable scope for the first tower
use_vs=[False] + [True] * (len(self.towers) - 1))
def build(self, grad_list, get_opt_fn):
"""
Reduce the gradients, apply them with the optimizer,
and set self.grads to #GPU number of lists of (g, v), containing the all-reduced gradients on each device.
Args:
grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.
get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer
Returns:
(tf.Operation, tf.Operation)
1. the training op.
2. the op which sync variables from GPU 0 to other GPUs.
It has to be run before the training has started.
And you can optionally run it later to sync non-trainable variables.
"""
assert len(grad_list) == len(self.towers)
raw_devices = ['/gpu:{}'.format(k) for k in self.towers]
DataParallelBuilder._check_grad_list(grad_list)
dtypes = {x[0].dtype.base_dtype for x in grad_list[0]}
dtypes_nccl_supported = [tf.float32, tf.float64]
if get_tf_version_tuple() >= (1, 8):
dtypes_nccl_supported.append(tf.float16)
valid_for_nccl = all(k in dtypes_nccl_supported for k in dtypes)
if self._mode == 'nccl' and not valid_for_nccl:
logger.warn("Cannot use mode='nccl' because some gradients have unsupported types. Fallback to mode='cpu'")
self._mode = 'cpu'
if self._mode in ['nccl', 'hierarchical']:
all_grads, all_vars = split_grad_list(grad_list)
# use allreduce from tf-benchmarks
# from .batch_allreduce import AllReduceSpecAlgorithm
# algo = AllReduceSpecAlgorithm('nccl', list(range(8)), 0, 10)
# all_grads, warmup_ops = algo.batch_all_reduce(all_grads, 1, True, False)
# print("WARMUP OPS", warmup_ops)
if self._mode == 'nccl':
all_grads = allreduce_grads(all_grads, average=self._average) # #gpu x #param
else:
packer = GradientPacker(len(raw_devices))
succ = packer.compute_strategy(all_grads[0])
if succ:
packed_grads = packer.pack_all(all_grads, raw_devices)
packed_grads_aggr = allreduce_grads_hierarchical(
packed_grads, raw_devices, average=self._average)
all_grads = packer.unpack_all(packed_grads_aggr, raw_devices)
else:
all_grads = allreduce_grads_hierarchical(all_grads, raw_devices, average=self._average)
self.grads = merge_grad_list(all_grads, all_vars)
elif self._mode == 'cpu':
agg_grad_and_vars = aggregate_grads(
grad_list, colocation=False,
devices=['/cpu:0'], average=self._average) # #param x 2
self.grads = [] # #gpu x #param x 2
for grad_and_vars in grad_list: # grad_and_vars: #paramx2
# take v from each tower, and g from average.
self.grads.append(
[(g, v) for (_, v), (g, _) in zip(grad_and_vars, agg_grad_and_vars)])
train_ops = []
opt = get_opt_fn()
with tf.name_scope('apply_gradients'):
for idx, grad_and_vars in enumerate(self.grads):
with tf.device(raw_devices[idx]):
# apply_gradients may create variables. Make them LOCAL_VARIABLES
with override_to_local_variable(enable=idx > 0):
train_ops.append(opt.apply_gradients(
grad_and_vars, name='apply_grad_{}'.format(idx)))
train_op = tf.group(*train_ops, name='train_op')
if len(self.towers) > 1:
with tf.name_scope('sync_variables'):
post_init_op = SyncMultiGPUReplicatedBuilder.get_post_init_ops()
else:
post_init_op = None
return train_op, post_init_op
# Adopt from https://github.com/tensorflow/benchmarks/blob/master/scripts/tf_cnn_benchmarks/variable_mgr.py
@staticmethod
def get_post_init_ops():
"""
Copy values of variables on GPU 0 to other GPUs.
"""
# literally all variables, because it's better to sync optimizer-internal variables as well
all_vars = tf.global_variables() + tf.local_variables()
var_by_name = {v.name: v for v in all_vars}
trainable_names = {x.name for x in tf.trainable_variables()}
post_init_ops = []
def log_failure(name, reason):
logger.warn("[ReplicatedTrainer] Do not know how to sync variable '{}' across GPUs. "
"Reason: {} ".format(name, reason))
assert name not in trainable_names, \
"The aforementioned variable is trainable, so this is probably a fatal error."
logger.warn(
"[ReplicatedTrainer] This variable is non-trainable. "
"Ignore this warning if you know it's OK to leave it out-of-sync.")
for v in all_vars:
if not v.name.startswith('tower'):
continue
if v.name.startswith('tower0'):
# in this trainer, the master name doesn't have the towerx/ prefix
log_failure(v.name, "Name should not have prefix 'tower0' in this trainer!")
continue # TODO some vars (EMA) may still startswith tower0
split_name = v.name.split('/')
prefix = split_name[0]
realname = '/'.join(split_name[1:])
if prefix in realname:
log_failure(v.name, "Prefix {} appears multiple times in its name!".format(prefix))
continue
copy_from = var_by_name.get(realname)
if copy_from is not None:
post_init_ops.append(v.assign(copy_from.read_value()))
else:
log_failure(v.name, "Cannot find {} in the graph!".format(realname))
logger.info(
"'sync_variables_from_main_tower' includes {} operations.".format(len(post_init_ops)))
return tf.group(*post_init_ops, name='sync_variables_from_main_tower')
class AsyncMultiGPUBuilder(DataParallelBuilder):
"""
Data-parallel training with async update.
It builds one tower on each GPU with shared variable scope.
Every tower computes the gradients and independently applies them to the
variables, without synchronizing and averaging across towers.
"""
def __init__(self, towers, scale_gradient=True):
"""
Args:
towers(list[int]): list of GPU ids.
scale_gradient (bool): if True, will scale each gradient by ``1.0/nr_gpu``.
"""
super(AsyncMultiGPUBuilder, self).__init__(towers)
self._scale_gradient = scale_gradient
def call_for_each_tower(self, tower_fn):
"""
Call the function `tower_fn` under :class:`TowerContext` for each tower.
Returns:
a list, contains the return values of `tower_fn` on each tower.
"""
ps_device = 'cpu' if len(self.towers) >= 4 else 'gpu'
raw_devices = ['/gpu:{}'.format(k) for k in self.towers]
if ps_device == 'gpu':
devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices]
else:
devices = [tf.train.replica_device_setter(
worker_device=d, ps_device='/cpu:0', ps_tasks=1) for d in raw_devices]
return DataParallelBuilder.build_on_towers(self.towers, tower_fn, devices)
def build(self, grad_list, get_opt_fn):
"""
Args:
grad_list ([[(grad, var), ...], ...]): #GPU lists to be reduced. Each is the gradients computed on each GPU.
get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer
Returns:
tf.Operation: the training op
"""
assert len(grad_list) == len(self.towers)
DataParallelBuilder._check_grad_list(grad_list)
if self._scale_gradient and len(self.towers) > 1:
# pretend to average the grads, in order to make async and
# sync have consistent effective learning rate
gradproc = ScaleGradient(('.*', 1.0 / len(self.towers)), verbose=False)
grad_list = [gradproc.process(gv) for gv in grad_list]
# Ngpu x Nvar x 2
train_ops = []
opt = get_opt_fn()
with tf.name_scope('async_apply_gradients'):
for i, grad_and_vars in enumerate(zip(*grad_list)):
# Ngpu x 2
v = grad_and_vars[0][1]
with tf.device(v.device):
# will call apply_gradients (therefore gradproc) multiple times
train_ops.append(opt.apply_gradients(
grad_and_vars, name='apply_grad_{}'.format(i)))
return tf.group(*train_ops, name='train_op')
| 17,248 | 40.167064 | 120 | py |
SyNet | SyNet-master/tensorpack/tensorpack/graph_builder/utils.py | # -*- coding: utf-8 -*-
# File: utils.py
import operator
from contextlib import contextmanager
import tensorflow as tf
from ..compat import tfv1
from ..tfutils.common import get_tf_version_tuple
from ..tfutils.scope_utils import cached_name_scope, under_name_scope
from ..tfutils.varreplace import custom_getter_scope
from ..utils import logger
from ..utils.argtools import call_only_once
__all__ = ["LeastLoadedDeviceSetter", "allreduce_grads", "aggregate_grads"]
"""
Some utilities for building the graph.
"""
def _replace_global_by_local(kwargs):
if 'collections' in kwargs:
collections = kwargs['collections']
if not collections:
collections = {tf.GraphKeys.GLOBAL_VARIABLES}
else:
collections = set(collections.copy())
collections.remove(tf.GraphKeys.GLOBAL_VARIABLES)
collections.add(tf.GraphKeys.LOCAL_VARIABLES)
kwargs['collections'] = list(collections)
@contextmanager
def override_to_local_variable(enable=True):
"""
Returns:
a context where all variables will be created as local.
"""
if enable:
def custom_getter(getter, name, *args, **kwargs):
_replace_global_by_local(kwargs)
return getter(name, *args, **kwargs)
with custom_getter_scope(custom_getter):
yield
else:
yield
# https://github.com/tensorflow/benchmarks/blob/48cbef14a592e02a14beee8e9aef3ad22cadaed1/scripts/tf_cnn_benchmarks/variable_mgr_util.py#L192-L218
class LeastLoadedDeviceSetter(object):
"""
Helper class to assign variables on the least loaded ps-device.
Usage:
.. code-block:: python
with tf.device(LeastLoadedDeviceSetter(...)):
...
"""
def __init__(self, worker_device, ps_devices):
"""
Args:
worker_device: the device to use for compute ops.
ps_devices: a list of device to use for Variable ops.
"""
self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = [0] * len(self.ps_devices)
def __call__(self, op):
# from tensorflow.python.training.device_util import canonicalize
# from tensorflow.python.distribute.device_util import canonicalize
def canonicalize(name): # tensorflow/tensorflow#11484
return tfv1.DeviceSpec.from_string(name).to_string()
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return canonicalize(self.worker_device)
device_index, _ = min(enumerate(
self.ps_sizes), key=operator.itemgetter(1))
device_name = self.ps_devices[device_index]
var_size = op.outputs[0].get_shape().num_elements()
if var_size is None:
logger.warn("[LeastLoadedDeviceSetter] Shape of variable {} is not fully defined!".format(op.name))
var_size = 0
self.ps_sizes[device_index] += var_size
return canonicalize(device_name)
def __str__(self):
return "LeastLoadedDeviceSetter-{}".format(self.worker_device)
def split_grad_list(grad_list):
"""
Args:
grad_list: K x N x 2
Returns:
K x N: gradients
K x N: variables
"""
g = []
v = []
for tower in grad_list:
g.append([x[0] for x in tower])
v.append([x[1] for x in tower])
return g, v
def merge_grad_list(all_grads, all_vars):
"""
Args:
all_grads (K x N): gradients
all_vars(K x N): variables
Return:
K x N x 2: list of list of (grad, var) pairs
"""
return [list(zip(gs, vs)) for gs, vs in zip(all_grads, all_vars)]
@under_name_scope('AllReduceGrads')
def allreduce_grads(all_grads, average):
"""
All-reduce average the gradients among K devices. Results are broadcasted to all devices.
Args:
all_grads (K x N): List of list of gradients. N is the number of variables.
average (bool): average gradients or not.
Returns:
K x N: same as input, but each grad is replaced by the average over K devices.
"""
if get_tf_version_tuple() <= (1, 12):
from tensorflow.contrib import nccl # deprecated
else:
from tensorflow.python.ops import nccl_ops as nccl
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads
new_all_grads = [] # N x K
for grads in zip(*all_grads):
summed = nccl.all_sum(grads)
grads_for_devices = [] # K
for g in summed:
with tf.device(g.device):
# tensorflow/benchmarks didn't average gradients
if average:
g = tf.multiply(g, 1.0 / nr_tower)
grads_for_devices.append(g)
new_all_grads.append(grads_for_devices)
# transpose to K x N
ret = list(zip(*new_all_grads))
return ret
@under_name_scope('AllReduceGradsHierachical')
def allreduce_grads_hierarchical(all_grads, devices, average=False):
"""
Hierarchical allreduce for DGX-1 system.
Args:
all_grads (K x N): List of list of gradients. N is the number of variables.
devices ([str]): K str for the K devices.
average (bool): average gradients or not.
Returns:
(K x N): same as input, but each grad is replaced by the average over K lists.
"""
num_gpu = len(devices)
assert num_gpu == 8, num_gpu
assert len(all_grads) == num_gpu, len(all_grads)
group_size = num_gpu // 2
agg_all_grads = [] # N x K
for varid, grads in enumerate(zip(*all_grads)):
# grads: K gradients
g0_main_gpu = varid % num_gpu
g1_main_gpu = (g0_main_gpu + group_size) % num_gpu
g0_start = 0 if g0_main_gpu < group_size else group_size
g1_start = 0 if g1_main_gpu < group_size else group_size
assert g0_start != g1_start
g0_grads = grads[g0_start: g0_start + group_size]
g1_grads = grads[g1_start: g1_start + group_size]
with tf.device(devices[g0_main_gpu]):
g0_agg = tf.add_n(g0_grads, name='group0_agg')
with tf.device(devices[g1_main_gpu]):
g1_agg = tf.add_n(g1_grads, name='group1_agg')
g1_total_agg = tf.add(g0_agg, g1_agg, name='group1_total_agg')
with tf.device(devices[g0_main_gpu]):
g0_total_agg = tf.identity(g1_total_agg, name='group0_total_agg')
agg_grads = [] # K aggregated grads
for k in range(num_gpu):
if (k < group_size) == (g0_main_gpu < group_size):
main_gpu = g0_total_agg
else:
main_gpu = g1_total_agg
with tf.device(devices[k]):
if not average:
device_total_agg = tf.identity(
main_gpu, name='device{}_total_agg'.format(k))
else:
# TODO where to put average?
device_total_agg = tf.multiply(
main_gpu, 1.0 / num_gpu, name='device{}_total_agg'.format(k))
agg_grads.append(device_total_agg)
agg_all_grads.append(agg_grads)
# transpose
agg_all_grads = list(zip(*agg_all_grads)) # K x Nvar
return agg_all_grads
@under_name_scope('AggregateGrads')
def aggregate_grads(all_grads,
colocation=False,
devices=None,
average=True):
"""
Average the gradients.
Args:
all_grads (K x N x 2): A list of K lists. Each of the list is a list of N (grad, var) tuples.
The variables have to be the same across the K lists.
colocation (bool): colocate gradient averaging on the device of the variable.
devices (list[str]): assign the averaging to these device in
round-robin. Cannot be used together with ``colocation``.
average (bool): do average or sum
Returns:
(N x 2): A list of N (grad, var) tuples, where grad is averaged or summed over K.
"""
assert not (devices is not None and colocation)
if devices is not None:
assert isinstance(devices, list), devices
nr_tower = len(all_grads)
if nr_tower == 1:
return all_grads[0]
def aggregate(grads):
if average:
return tf.multiply(tf.add_n(grads), 1.0 / nr_tower)
else:
return tf.add_n(grads)
ret = []
for idx, grad_and_vars in enumerate(zip(*all_grads)):
# Ngpu * 2
v = grad_and_vars[0][1]
grads = [g for (g, _) in grad_and_vars]
if colocation:
with tf.device(v.device): # colocate summed grad with var
grad = aggregate(grads)
elif devices is None:
grad = aggregate(grads)
else:
dev = devices[idx % len(devices)]
with tf.device(dev):
grad = aggregate(grads)
ret.append((grad, v))
return ret
average_grads = aggregate_grads
# https://github.com/tensorflow/benchmarks/blob/48cbef14a592e02a14beee8e9aef3ad22cadaed1/scripts/tf_cnn_benchmarks/variable_mgr_util.py#L140-L166
class OverrideCachingDevice(object):
"""Variable getter which caches variables on the least loaded device.
Variables smaller than a certain threshold are cached on a single specific
device, as specified in the constructor. All other variables are load balanced
across a pool of devices, by caching each variable on the least loaded device.
"""
def __init__(self, devices, device_for_small_variables,
small_variable_size_threshold):
self.devices = devices
self.sizes = [0] * len(self.devices)
self.device_for_small_variables = device_for_small_variables
self.small_variable_size_threshold = small_variable_size_threshold
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size is None or not kwargs.get('trainable', True):
# TODO a lot of vars won't be saved then
_replace_global_by_local(kwargs)
return getter(*args, **kwargs)
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
class GradientPacker(object):
"""
Concat gradients together to optimize transfer.
"""
def __init__(self, num_split=8):
self._num_split = num_split
@call_only_once
def compute_strategy(self, grads):
"""
Returns:
bool - False if grads cannot be packed due to various reasons.
"""
for g in grads:
assert g.shape.is_fully_defined(), "Shape of {} is {}!".format(g.name, g.shape)
self._shapes = [g.shape for g in grads]
self._sizes = [g.shape.num_elements() for g in grads]
self._total_size = sum(self._sizes)
if self._total_size / self._num_split < 1024:
logger.info("Skip GradientPacker due to too few gradients.")
return False
# should have the same dtype
dtypes = {g.dtype for g in grads}
if len(dtypes) != 1:
logger.info("Skip GradientPacker due to inconsistent gradient types.")
return False
self._grad_dtype = grads[0].dtype
split_size = self._total_size // self._num_split
split_size_last = self._total_size - split_size * (self._num_split - 1)
self._split_sizes = [split_size] * (self._num_split - 1) + [split_size_last]
logger.info(
"Will pack {} gradients of total dimension={} into {} splits.".format(
len(self._sizes), self._total_size, self._num_split))
return True
def pack(self, grads):
"""
Args:
grads (list): list of gradient tensors
Returns:
packed list of gradient tensors to be aggregated.
"""
for i, g in enumerate(grads):
assert g.shape == self._shapes[i]
with cached_name_scope("GradientPacker", top_level=False):
concat_grads = tf.concat([tf.reshape(g, [-1]) for g in grads], 0, name='concatenated_grads')
# concat_grads = tf.cast(concat_grads, tf.float16)
grad_packs = tf.split(concat_grads, self._split_sizes)
return grad_packs
def unpack(self, grad_packs):
with cached_name_scope("GradientPacker", top_level=False):
concat_grads = tf.concat(grad_packs, 0, name='concatenated_packs')
# concat_grads = tf.cast(concat_grads, self._grad_dtype)
flattened_grads = tf.split(concat_grads, self._sizes)
grads = [tf.reshape(g, shape) for g, shape in zip(flattened_grads, self._shapes)]
return grads
def pack_all(self, all_grads, devices):
"""
Args:
all_grads: K x N, K lists of gradients to be packed
"""
ret = [] # #GPU x #split
for dev, grads in zip(devices, all_grads):
with tf.device(dev):
ret.append(self.pack(grads))
return ret
def unpack_all(self, all_packed, devices):
"""
Args:
all_packed: K lists of packed gradients.
"""
all_grads = [] # #GPU x #Var
for dev, packed_grads_single_device in zip(devices, all_packed):
with tf.device(dev):
all_grads.append(self.unpack(packed_grads_single_device))
return all_grads
| 13,752 | 32.874384 | 145 | py |
SyNet | SyNet-master/tensorpack/tensorpack/graph_builder/distributed.py | # -*- coding: utf-8 -*-
# File: distributed.py
import re
import tensorflow as tf
from ..tfutils.common import get_global_step_var, get_op_tensor_name
from ..utils import logger
from ..utils.argtools import memoized
from .training import DataParallelBuilder, GraphBuilder
from .utils import OverrideCachingDevice, aggregate_grads, override_to_local_variable
__all__ = []
class DistributedBuilderBase(GraphBuilder):
_sync_queue_counter = 0
def __init__(self, server):
self.server = server
server_def = server.server_def
self.cluster = tf.train.ClusterSpec(server_def.cluster)
self.task_index = server_def.task_index
self.num_ps = self.cluster.num_tasks('ps')
self.num_worker = self.cluster.num_tasks('worker')
def _add_sync_queues_and_barrier(self, name, dependencies):
"""Adds ops to enqueue on all worker queues.
Args:
name: prefixed for the shared_name of ops.
dependencies: control dependency from ops.
Returns:
an op that should be used as control dependency before starting next step.
"""
self._sync_queue_counter += 1
with tf.device(self.sync_queue_devices[self._sync_queue_counter % len(self.sync_queue_devices)]):
sync_queues = [
tf.FIFOQueue(self.num_worker, [tf.bool], shapes=[[]],
shared_name='%s%s' % (name, i))
for i in range(self.num_worker)]
queue_ops = []
# For each other worker, add an entry in a queue, signaling that it can finish this step.
token = tf.constant(False)
with tf.control_dependencies(dependencies):
for i, q in enumerate(sync_queues):
if i != self.task_index:
queue_ops.append(q.enqueue(token))
# Drain tokens off queue for this worker, one for each other worker.
queue_ops.append(
sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1))
return tf.group(*queue_ops, name=name)
class DistributedParameterServerBuilder(DataParallelBuilder, DistributedBuilderBase):
"""
Distributed parameter server training.
A single copy of parameters are scattered around PS.
Gradients across GPUs are averaged within the worker, and applied to PS.
Each worker also caches the variables for reading.
It is an equivalent of ``--variable_update=parameter_server`` in
`tensorflow/benchmarks <https://github.com/tensorflow/benchmarks>`_.
However this implementation hasn't been well tested.
It probably still has issues in model saving, etc.
Also, TensorFlow team is not actively maintaining distributed training features.
Check :class:`HorovodTrainer` and
`ResNet-Horovod <https://github.com/tensorpack/benchmarks/tree/master/ResNet-Horovod>`_
for better distributed training support.
Note:
1. Gradients are not averaged across workers, but applied to PS variables
directly (either with or without locking depending on the optimizer).
"""
def __init__(self, towers, server, caching_device):
"""
Args:
towers (list[int]): list of GPU ids.
server (tf.train.Server): the server with ps and workers.
job_name must be 'worker'.
caching_device (str): either 'cpu' or 'gpu'
"""
DataParallelBuilder.__init__(self, towers)
DistributedBuilderBase.__init__(self, server)
assert caching_device in ['cpu', 'gpu'], caching_device
self.caching_device = caching_device
self.is_chief = (self.task_index == 0)
worker_prefix = '/job:worker/task:%s' % self.task_index
self.param_server_device = tf.train.replica_device_setter(
worker_device=worker_prefix + '/cpu:0', cluster=self.cluster)
self.cpu_device = '%s/cpu:0' % worker_prefix
self.raw_devices = ['{}/gpu:{}'.format(worker_prefix, k) for k in self.towers]
self.sync_queue_devices = ['/job:ps/task:%s/cpu:0' % i for i in range(self.num_ps)]
def build(self, get_grad_fn, get_opt_fn):
ps_strategy = tf.contrib.training.GreedyLoadBalancingStrategy(
self.num_ps, tf.contrib.training.byte_size_load_fn)
devices = [
tf.train.replica_device_setter(
worker_device=d,
cluster=self.cluster,
ps_strategy=ps_strategy) for d in self.raw_devices]
if self.caching_device == 'gpu':
caching_devices = self.raw_devices
else:
caching_devices = [self.cpu_device]
custom_getter = OverrideCachingDevice(
caching_devices, self.cpu_device, 1024 * 64)
with tf.variable_scope(tf.get_variable_scope(), custom_getter=custom_getter):
grad_list = DataParallelBuilder.build_on_towers(self.towers, get_grad_fn, devices)
DataParallelBuilder._check_grad_list(grad_list)
with tf.device(self.param_server_device):
grads = aggregate_grads(grad_list, colocation=False)
opt = get_opt_fn()
train_op = opt.apply_gradients(grads, name='train_op')
train_op = self._add_sync_queues_and_barrier('all_workers_sync_barrier', [train_op])
return train_op
class DistributedReplicatedBuilder(DataParallelBuilder, DistributedBuilderBase):
"""
Distributed replicated training.
Each worker process builds the same model on one or more GPUs.
Gradients across GPUs are averaged within the worker,
and get synchronously applied to the global copy of variables located on PS.
Then each worker copy the latest variables from PS back to local.
It is an equivalent of ``--variable_update=distributed_replicated`` in
`tensorflow/benchmarks <https://github.com/tensorflow/benchmarks>`_.
Note that the performance of this trainer is still not satisfactory,
and TensorFlow team is not actively maintaining distributed training features.
Check :class:`HorovodTrainer` and
`ResNet-Horovod <https://github.com/tensorpack/benchmarks/tree/master/ResNet-Horovod>`_
for better distributed training support.
Note:
1. Gradients are not averaged across workers, but applied to PS variables
directly (either with or without locking depending on the optimizer).
2. Some details about collections: all variables created inside tower
will become local variables,
and a clone will be made in global variables for all trainable/model variables.
Example:
.. code-block:: python
# Create the server object like this:
hosts = ['host1.com', 'host2.com']
cluster_spec = tf.train.ClusterSpec({
'ps': [h + ':2222' for h in hosts],
'worker': [h + ':2223' for h in hosts]
})
server = tf.train.Server(
cluster_spec, job_name=args.job, task_index=args.task,
config=get_default_sess_config())
# initialize trainer with this server object
.. code-block:: none
# Start training like this:
(host1)$ ./train.py --job worker --task 0
(host1)$ CUDA_VISIBLE_DEVICES= ./train.py --job ps --task 0
(host2)$ ./train.py --job worker --task 1
(host2)$ CUDA_VISIBLE_DEVICES= ./train.py --job ps --task 1
"""
def __init__(self, towers, server):
"""
Args:
towers (list[int]): list of GPU ids.
server (tf.train.Server): the server with ps and workers.
job_name must be 'worker'.
"""
DataParallelBuilder.__init__(self, towers)
DistributedBuilderBase.__init__(self, server)
self.is_chief = (self.task_index == 0)
worker_prefix = '/job:worker/task:%s' % self.task_index
self.param_server_device = tf.train.replica_device_setter(
worker_device=worker_prefix + '/cpu:0', cluster=self.cluster)
self.nr_gpu = len(self.towers)
self.cpu_device = '%s/cpu:0' % worker_prefix
self.raw_devices = ['%s/gpu:%i' % (worker_prefix, i) for i in towers]
# Device for queues for managing synchronization between servers
self.sync_queue_devices = ['/job:ps/task:%s/cpu:0' % i for i in range(self.num_ps)]
@staticmethod
def _apply_shadow_vars(avg_grads):
"""
Create shadow variables on PS, and replace variables in avg_grads
by these shadow variables.
Args:
avg_grads: list of (grad, var) tuples
"""
ps_var_grads = []
for grad, var in avg_grads:
assert var.name.startswith('tower'), var.name
my_name = '/'.join(var.name.split('/')[1:])
my_name = get_op_tensor_name(my_name)[0]
new_v = tf.get_variable(my_name, dtype=var.dtype.base_dtype,
initializer=var.initial_value,
trainable=True)
# (g, v) to be applied, where v is global (ps vars)
ps_var_grads.append((grad, new_v))
return ps_var_grads
@staticmethod
def _shadow_model_variables(shadow_vars):
"""
Create shadow vars for model_variables as well, and add to the list of ``shadow_vars``.
Returns:
list of (shadow_model_var, local_model_var) used for syncing.
"""
G = tf.get_default_graph()
curr_shadow_vars = {v.name for v in shadow_vars}
model_vars = tf.model_variables()
shadow_model_vars = []
for v in model_vars:
assert v.name.startswith('tower'), "Found some MODEL_VARIABLES created outside of the tower function!"
stripped_op_name, stripped_var_name = get_op_tensor_name(re.sub('^tower[0-9]+/', '', v.name))
if stripped_op_name in curr_shadow_vars:
continue
try:
G.get_tensor_by_name(stripped_var_name)
logger.warn("Model Variable {} also appears in other collections.".format(stripped_var_name))
continue
except KeyError:
pass
new_v = tf.get_variable(stripped_op_name, dtype=v.dtype.base_dtype,
initializer=v.initial_value,
trainable=False)
curr_shadow_vars.add(stripped_op_name) # avoid duplicated shadow_model_vars
shadow_vars.append(new_v)
shadow_model_vars.append((new_v, v)) # only need to sync model_var from one tower
return shadow_model_vars
def build(self, get_grad_fn, get_opt_fn):
"""
Args:
get_grad_fn (-> [(grad, var)]):
get_opt_fn (-> tf.train.Optimizer): callable which returns an optimizer
Returns:
(tf.Operation, tf.Operation, tf.Operation):
1. the training op.
2. the op which sync all the local variables from PS.
This op should be run before training.
3. the op which sync all the local `MODEL_VARIABLES` from PS.
You can choose how often to run it by yourself.
"""
with override_to_local_variable():
get_global_step_var()
get_opt_fn = memoized(get_opt_fn)
# Build the optimizer first, before entering any tower.
# This makes sure that learning_rate is a global variable (what we expect)
get_opt_fn() # TODO get_opt_fn called before main graph was built
# Ngpu * Nvar * 2
grad_list = DataParallelBuilder.build_on_towers(
self.towers, get_grad_fn,
devices=self.raw_devices,
use_vs=[True] * len(self.towers)) # open vs at each tower
DataParallelBuilder._check_grad_list(grad_list)
avg_grads = aggregate_grads(
grad_list, colocation=False, devices=self.raw_devices)
with tf.device(self.param_server_device):
ps_var_grads = DistributedReplicatedBuilder._apply_shadow_vars(avg_grads)
var_update_ops = self._apply_gradients_and_copy(
get_opt_fn(), grad_list, ps_var_grads)
self._shadow_vars = [v for (__, v) in ps_var_grads]
self._shadow_model_vars = DistributedReplicatedBuilder._shadow_model_variables(self._shadow_vars)
# TODO add options to synchronize less
main_fetch = tf.group(*var_update_ops, name='main_fetches')
train_op = self._add_sync_queues_and_barrier(
'post_copy_barrier', [main_fetch])
# initial local_vars syncing
with tf.name_scope('initial_sync_variables'):
initial_sync_op = self._get_initial_sync_op()
if len(self._shadow_model_vars) and self.is_chief:
with tf.name_scope('sync_model_variables'):
model_sync_op = self._get_sync_model_vars_op()
else:
model_sync_op = None
return train_op, initial_sync_op, model_sync_op
def _apply_gradients_and_copy(self, opt, raw_grad_list, ps_var_grads):
"""
Apply averaged gradients to ps vars, and then copy the updated
variables back to each tower.
Args:
raw_grad_list: Ngpu x Nvar x 2 gradient list from all towers
ps_var_grads: Nvar x 2 (grad, ps_var)
Returns:
list of copy ops
"""
# TODO do this for variables together?
with tf.name_scope('apply_gradients'):
var_update_ops = []
for vid, (g, v) in enumerate(ps_var_grads):
# TODO do we put momentum variables into local or global?
apply_gradient_op = opt.apply_gradients([(g, v)])
barrier = self._add_sync_queues_and_barrier(
'param_update_barrier_{}'.format(vid), [apply_gradient_op])
with tf.control_dependencies([barrier]), \
tf.device(self.cpu_device):
updated_value = v.read_value()
for towerid in range(self.nr_gpu):
var_update_ops.append(
raw_grad_list[towerid][vid][1].assign(updated_value))
return var_update_ops
def _get_initial_sync_op(self):
"""
Get the op to copy-initialized all local variables from PS.
"""
def strip_port(s):
if s.endswith(':0'):
return s[:-2]
return s
local_vars = tf.local_variables()
local_var_by_name = {strip_port(v.name): v for v in local_vars}
ops = []
nr_shadow_vars = len(self._shadow_vars)
for v in self._shadow_vars:
vname = strip_port(v.name)
for i in range(self.nr_gpu):
name = 'tower%s/%s' % (i, vname)
assert name in local_var_by_name, \
"Shadow variable {} doesn't match a corresponding local variable!".format(v.name)
copy_to = local_var_by_name[name]
# logger.info("{} -> {}".format(v.name, copy_to.name))
ops.append(copy_to.assign(v.read_value()))
return tf.group(*ops, name='sync_{}_variables_from_ps'.format(nr_shadow_vars))
def _get_sync_model_vars_op(self):
"""
Get the op to sync local model_variables to PS.
"""
ops = []
for (shadow_v, local_v) in self._shadow_model_vars:
ops.append(shadow_v.assign(local_v.read_value()))
assert len(ops)
return tf.group(*ops, name='sync_{}_model_variables_to_ps'.format(len(ops)))
| 15,716 | 41.25 | 114 | py |
SyNet | SyNet-master/tensorpack/tensorpack/graph_builder/__init__.py | # -*- coding: utf-8 -*-
# File: __init__.py
# https://github.com/celery/kombu/blob/7d13f9b95d0b50c94393b962e6def928511bfda6/kombu/__init__.py#L34-L36
STATICA_HACK = True
globals()['kcah_acitats'[::-1].upper()] = False
if STATICA_HACK:
from .model_desc import *
from .training import *
from .distributed import *
from .utils import *
from .model_desc import ModelDesc, ModelDescBase
from pkgutil import iter_modules
import os
import os.path
__all__ = []
def global_import(name):
p = __import__(name, globals(), locals(), level=1)
lst = p.__all__ if '__all__' in dir(p) else []
del globals()[name]
for k in lst:
if not k.startswith('__'):
globals()[k] = p.__dict__[k]
__all__.append(k)
_CURR_DIR = os.path.dirname(__file__)
_SKIP = ['distributed']
for _, module_name, _ in iter_modules(
[_CURR_DIR]):
srcpath = os.path.join(_CURR_DIR, module_name + '.py')
if not os.path.isfile(srcpath):
continue
if module_name.startswith('_'):
continue
if module_name not in _SKIP:
global_import(module_name)
| 1,113 | 25.52381 | 105 | py |
SyNet | SyNet-master/tensorpack/tensorpack/graph_builder/model_desc.py | # -*- coding: utf-8 -*-
# File: model_desc.py
from ..train.model_desc import ModelDesc, ModelDescBase # kept for BC # noqa
__all__ = []
| 141 | 14.777778 | 77 | py |
pytorch-playground | pytorch-playground-master/setup.py | from setuptools import setup, find_packages
with open("requirements.txt") as requirements_file:
REQUIREMENTS = requirements_file.readlines()
setup(
name="pytorch-playground",
version="1.0.0",
author='Aaron Chen',
author_email='[email protected]',
packages=find_packages(),
entry_points = {
'console_scripts': [
'quantize=quantize:main',
]
},
install_requires=REQUIREMENTS,
)
| 447 | 21.4 | 51 | py |
pytorch-playground | pytorch-playground-master/quantize.py | import argparse
from utee import misc, quant, selector
import torch
import torch.backends.cudnn as cudnn
cudnn.benchmark =True
from collections import OrderedDict
def main():
parser = argparse.ArgumentParser(description='PyTorch SVHN Example')
parser.add_argument('--type', default='cifar10', help='|'.join(selector.known_models))
parser.add_argument('--quant_method', default='linear', help='linear|minmax|log|tanh')
parser.add_argument('--batch_size', type=int, default=100, help='input batch size for training (default: 64)')
parser.add_argument('--gpu', default=None, help='index of gpus to use')
parser.add_argument('--ngpu', type=int, default=8, help='number of gpus to use')
parser.add_argument('--seed', type=int, default=117, help='random seed (default: 1)')
parser.add_argument('--model_root', default='~/.torch/models/', help='folder to save the model')
parser.add_argument('--data_root', default='/data/public_dataset/pytorch/', help='folder to save the model')
parser.add_argument('--logdir', default='log/default', help='folder to save to the log')
parser.add_argument('--input_size', type=int, default=224, help='input size of image')
parser.add_argument('--n_sample', type=int, default=20, help='number of samples to infer the scaling factor')
parser.add_argument('--param_bits', type=int, default=8, help='bit-width for parameters')
parser.add_argument('--bn_bits', type=int, default=32, help='bit-width for running mean and std')
parser.add_argument('--fwd_bits', type=int, default=8, help='bit-width for layer output')
parser.add_argument('--overflow_rate', type=float, default=0.0, help='overflow rate')
args = parser.parse_args()
args.gpu = misc.auto_select_gpu(utility_bound=0, num_gpu=args.ngpu, selected_gpus=args.gpu)
args.ngpu = len(args.gpu)
misc.ensure_dir(args.logdir)
args.model_root = misc.expand_user(args.model_root)
args.data_root = misc.expand_user(args.data_root)
args.input_size = 299 if 'inception' in args.type else args.input_size
assert args.quant_method in ['linear', 'minmax', 'log', 'tanh']
print("=================FLAGS==================")
for k, v in args.__dict__.items():
print('{}: {}'.format(k, v))
print("========================================")
assert torch.cuda.is_available(), 'no cuda'
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
# load model and dataset fetcher
model_raw, ds_fetcher, is_imagenet = selector.select(args.type, model_root=args.model_root)
args.ngpu = args.ngpu if is_imagenet else 1
# quantize parameters
if args.param_bits < 32:
state_dict = model_raw.state_dict()
state_dict_quant = OrderedDict()
sf_dict = OrderedDict()
for k, v in state_dict.items():
if 'running' in k:
if args.bn_bits >=32:
print("Ignoring {}".format(k))
state_dict_quant[k] = v
continue
else:
bits = args.bn_bits
else:
bits = args.param_bits
if args.quant_method == 'linear':
sf = bits - 1. - quant.compute_integral_part(v, overflow_rate=args.overflow_rate)
v_quant = quant.linear_quantize(v, sf, bits=bits)
elif args.quant_method == 'log':
v_quant = quant.log_minmax_quantize(v, bits=bits)
elif args.quant_method == 'minmax':
v_quant = quant.min_max_quantize(v, bits=bits)
else:
v_quant = quant.tanh_quantize(v, bits=bits)
state_dict_quant[k] = v_quant
print(k, bits)
model_raw.load_state_dict(state_dict_quant)
# quantize forward activation
if args.fwd_bits < 32:
model_raw = quant.duplicate_model_with_quant(model_raw, bits=args.fwd_bits, overflow_rate=args.overflow_rate,
counter=args.n_sample, type=args.quant_method)
print(model_raw)
val_ds_tmp = ds_fetcher(10, data_root=args.data_root, train=False, input_size=args.input_size)
misc.eval_model(model_raw, val_ds_tmp, ngpu=1, n_sample=args.n_sample, is_imagenet=is_imagenet)
# eval model
val_ds = ds_fetcher(args.batch_size, data_root=args.data_root, train=False, input_size=args.input_size)
acc1, acc5 = misc.eval_model(model_raw, val_ds, ngpu=args.ngpu, is_imagenet=is_imagenet)
# print sf
print(model_raw)
res_str = "type={}, quant_method={}, param_bits={}, bn_bits={}, fwd_bits={}, overflow_rate={}, acc1={:.4f}, acc5={:.4f}".format(
args.type, args.quant_method, args.param_bits, args.bn_bits, args.fwd_bits, args.overflow_rate, acc1, acc5)
print(res_str)
with open('acc1_acc5.txt', 'a') as f:
f.write(res_str + '\n')
if __name__ == '__main__':
main()
| 4,928 | 48.29 | 132 | py |
pytorch-playground | pytorch-playground-master/svhn/model.py | import torch
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import os
from collections import OrderedDict
from utee import misc
print = misc.logger.info
model_urls = {
'svhn': 'http://ml.cs.tsinghua.edu.cn/~chenxi/pytorch-models/svhn-f564f3d8.pth',
}
class SVHN(nn.Module):
def __init__(self, features, n_channel, num_classes):
super(SVHN, self).__init__()
assert isinstance(features, nn.Sequential), type(features)
self.features = features
self.classifier = nn.Sequential(
nn.Linear(n_channel, num_classes)
)
print(self.features)
print(self.classifier)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for i, v in enumerate(cfg):
if v == 'M':
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
padding = v[1] if isinstance(v, tuple) else 1
out_channels = v[0] if isinstance(v, tuple) else v
conv2d = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=padding)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(out_channels, affine=False), nn.ReLU(), nn.Dropout(0.3)]
else:
layers += [conv2d, nn.ReLU(), nn.Dropout(0.3)]
in_channels = out_channels
return nn.Sequential(*layers)
def svhn(n_channel, pretrained=None):
cfg = [n_channel, n_channel, 'M', 2*n_channel, 2*n_channel, 'M', 4*n_channel, 4*n_channel, 'M', (8*n_channel, 0), 'M']
layers = make_layers(cfg, batch_norm=True)
model = SVHN(layers, n_channel=8*n_channel, num_classes=10)
if pretrained is not None:
m = model_zoo.load_url(model_urls['svhn'])
state_dict = m.state_dict() if isinstance(m, nn.Module) else m
assert isinstance(state_dict, (dict, OrderedDict)), type(state_dict)
model.load_state_dict(state_dict)
return model
| 2,056 | 33.864407 | 122 | py |
Subsets and Splits