python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestBatchSparseToDense(serial.SerializedTestCase):
@given(
batch_size=st.integers(5, 10),
dense_last_dim=st.integers(5, 10),
default_value=st.floats(min_value=2.0, max_value=3.0),
**hu.gcs
)
@settings(deadline=None)
def test_batch_sparse_to_dense(
self, batch_size, dense_last_dim, default_value, gc, dc
):
L = np.random.randint(1, dense_last_dim + 1, size=(batch_size))
num_data = L.sum()
# The following logic ensure that indices in each batch will not be duplicated
I = np.array([]).astype(np.int32)
for l in L:
I_l = np.random.choice(dense_last_dim, l, replace=False)
I = np.concatenate((I, I_l))
V = np.random.rand(num_data).astype(np.float32)
op = core.CreateOperator(
'BatchSparseToDense',
['L', 'I', 'V'],
['O'],
dense_last_dim=dense_last_dim,
default_value=default_value,
)
S = np.random.rand(batch_size, dense_last_dim).astype(np.float32)
op2 = core.CreateOperator(
'BatchSparseToDense',
['L', 'I', 'V', 'S'],
['O'],
default_value=default_value,
)
def batch_sparse_to_dense_ref(L, I, V, S=None):
if S is None:
ret = np.zeros((batch_size, dense_last_dim))
else:
ret = np.zeros(S.shape)
ret.fill(default_value)
batch = 0
v_idx = 0
for length in L:
for _ in range(length):
ret[batch][I[v_idx]] = V[v_idx]
v_idx += 1
batch += 1
return [ret]
self.assertDeviceChecks(dc, op, [L, I, V], [0])
self.assertReferenceChecks(gc, op, [L, I, V], batch_sparse_to_dense_ref)
self.assertGradientChecks(gc, op, [L, I, V], 2, [0])
self.assertDeviceChecks(dc, op2, [L, I, V, S], [0])
self.assertReferenceChecks(gc, op2, [L, I, V, S], batch_sparse_to_dense_ref)
self.assertGradientChecks(gc, op2, [L, I, V, S], 2, [0])
self.assertDeviceChecks(dc, op, [L.astype(np.int32), I, V], [0])
self.assertReferenceChecks(gc, op, [L.astype(np.int32), I, V], batch_sparse_to_dense_ref)
self.assertGradientChecks(gc, op, [L.astype(np.int32), I, V], 2, [0])
@given(
batch_size=st.integers(5, 10),
dense_last_dim=st.integers(5, 10),
**hu.gcs
)
@settings(deadline=None)
def test_batch_dense_to_sparse(self, batch_size, dense_last_dim, gc, dc):
L = np.random.randint(1, dense_last_dim + 1, size=(batch_size))
# The following logic ensure that indices in each batch will not be duplicated
I = np.array([]).astype(np.int32)
for l in L:
I_l = np.random.choice(dense_last_dim, l, replace=False)
I = np.concatenate((I, I_l))
D = np.random.rand(batch_size, dense_last_dim).astype(np.float32)
op = core.CreateOperator(
'BatchDenseToSparse',
['L', 'I', 'D'],
['V'],
)
def batch_dense_to_sparse_ref(L, I, D):
ret = np.zeros(I.shape)
batch = 0
i_idx = 0
for length in L:
for _ in range(length):
ret[i_idx] = D[batch][I[i_idx]]
i_idx += 1
batch += 1
return [ret]
print(L, I, D)
self.assertDeviceChecks(dc, op, [L, I, D], [0])
self.assertReferenceChecks(gc, op, [L, I, D], batch_dense_to_sparse_ref)
self.assertGradientChecks(gc, op, [L, I, D], 2, [0])
self.assertDeviceChecks(dc, op, [L.astype(np.int32), I, D], [0])
self.assertReferenceChecks(gc, op, [L.astype(np.int32), I, D], batch_dense_to_sparse_ref)
self.assertGradientChecks(gc, op, [L.astype(np.int32), I, D], 2, [0])
|
pytorch-master
|
caffe2/python/operator_test/batch_sparse_to_dense_op_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestAffineChannelOp(serial.SerializedTestCase):
def affine_channel_nchw_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
scale = scale.reshape(C, 1)
bias = bias.reshape(C, 1)
Y = X * scale + bias
return [Y.reshape(dims)]
def affine_channel_nhwc_ref(self, X, scale, bias):
dims = X.shape
N = dims[0]
C = dims[-1]
X = X.reshape(N, -1, C)
Y = X * scale + bias
return [Y.reshape(dims)]
@serial.given(N=st.integers(1, 5), C=st.integers(1, 5),
H=st.integers(1, 5), W=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]), is_learnable=st.booleans(),
in_place=st.booleans(), **hu.gcs)
def test_affine_channel_2d(
self, N, C, H, W, order, is_learnable, in_place, gc, dc):
op = core.CreateOperator(
"AffineChannel",
["X", "scale", "bias"],
["X"] if in_place and not is_learnable else ["Y"],
order=order,
is_learnable=is_learnable,
)
if order == "NCHW":
X = np.random.randn(N, C, H, W).astype(np.float32)
else:
X = np.random.randn(N, H, W, C).astype(np.float32)
scale = np.random.randn(C).astype(np.float32)
bias = np.random.randn(C).astype(np.float32)
inputs = [X, scale, bias]
def ref_op(X, scale, bias):
if order == "NCHW":
return self.affine_channel_nchw_ref(X, scale, bias)
else:
return self.affine_channel_nhwc_ref(X, scale, bias)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref_op,
)
self.assertDeviceChecks(dc, op, inputs, [0])
num_grad = len(inputs) if is_learnable else 1
for i in range(num_grad):
self.assertGradientChecks(gc, op, inputs, i, [0])
@given(N=st.integers(1, 5), C=st.integers(1, 5), T=st.integers(1, 3),
H=st.integers(1, 3), W=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]), is_learnable=st.booleans(),
in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_affine_channel_3d(
self, N, C, T, H, W, order, is_learnable, in_place, gc, dc):
op = core.CreateOperator(
"AffineChannel",
["X", "scale", "bias"],
["X"] if in_place and not is_learnable else ["Y"],
order=order,
is_learnable=is_learnable,
)
if order == "NCHW":
X = np.random.randn(N, C, T, H, W).astype(np.float32)
else:
X = np.random.randn(N, T, H, W, C).astype(np.float32)
scale = np.random.randn(C).astype(np.float32)
bias = np.random.randn(C).astype(np.float32)
inputs = [X, scale, bias]
def ref_op(X, scale, bias):
if order == "NCHW":
return self.affine_channel_nchw_ref(X, scale, bias)
else:
return self.affine_channel_nhwc_ref(X, scale, bias)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref_op,
)
self.assertDeviceChecks(dc, op, inputs, [0])
num_grad = len(inputs) if is_learnable else 1
for i in range(num_grad):
self.assertGradientChecks(gc, op, inputs, i, [0])
|
pytorch-master
|
caffe2/python/operator_test/affine_channel_op_test.py
|
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
class ChannelShuffleOpsTest(serial.SerializedTestCase):
def _channel_shuffle_nchw_ref(self, X, group):
dims = X.shape
N = dims[0]
C = dims[1]
G = group
K = int(C / G)
X = X.reshape(N, G, K, np.prod(dims[2:]))
Y = np.transpose(X, axes=(0, 2, 1, 3))
return [Y.reshape(dims)]
def _channel_shuffle_nhwc_ref(self, X, group):
dims = X.shape
N = dims[0]
C = dims[-1]
G = group
K = int(C / G)
X = X.reshape(N, np.prod(dims[1:-1]), G, K)
Y = np.transpose(X, axes=(0, 1, 3, 2))
return [Y.reshape(dims)]
@serial.given(
N=st.integers(0, 5),
G=st.integers(1, 5),
K=st.integers(1, 5),
H=st.integers(1, 5),
W=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
**hu.gcs
)
def test_channel_shuffle(self, N, G, K, H, W, order, gc, dc):
C = G * K
if order == "NCHW":
X = np.random.randn(N, C, H, W).astype(np.float32)
else:
X = np.random.randn(N, H, W, C).astype(np.float32)
op = core.CreateOperator("ChannelShuffle", ["X"], ["Y"], group=G, order=order)
def channel_shuffle_ref(X):
if order == "NCHW":
return self._channel_shuffle_nchw_ref(X, G)
else:
return self._channel_shuffle_nhwc_ref(X, G)
self.assertReferenceChecks(gc, op, [X], channel_shuffle_ref)
self.assertGradientChecks(gc, op, [X], 0, [0])
self.assertDeviceChecks(dc, op, [X], [0])
|
pytorch-master
|
caffe2/python/operator_test/channel_shuffle_test.py
|
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestPairWiseLossOps(serial.SerializedTestCase):
@given(X=hu.arrays(dims=[2, 1],
elements=hu.floats(min_value=0.0, max_value=10.0)),
label=hu.arrays(dims=[2, 1],
elements=st.integers(min_value=0, max_value=1),
dtype=np.float32),
**hu.gcs_cpu_only)
def test_pair_wise_loss_predictions(self, X, label, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('label', label)
new_label = np.array([label[1], label[0]])
new_x = np.array([X[1], X[0]])
workspace.FeedBlob('new_x', new_x)
workspace.FeedBlob('new_label', new_label)
net = core.Net('net')
net.PairWiseLoss(['X', 'label'], ['output'])
net.PairWiseLoss(['new_x', 'new_label'], ['new_output'])
plan = core.Plan('predict_data')
plan.AddStep(core.execution_step('predict_data',
[net], num_iter=1))
workspace.RunPlan(plan)
output = workspace.FetchBlob('output')
new_output = workspace.FetchBlob('new_output')
sign = 1 if label[0] > label[1] else -1
if label[0] == label[1]:
self.assertEqual(np.asscalar(output), 0)
return
self.assertAlmostEqual(
np.asscalar(output),
np.asscalar(np.log(1 + np.exp(sign * (X[1] - X[0])))),
delta=1e-4
)
# check swapping row order doesn't alter overall loss
self.assertAlmostEqual(output, new_output)
@given(X=hu.arrays(dims=[2, 1],
elements=hu.floats(min_value=0.0, max_value=10.0)),
label=hu.arrays(dims=[2, 1],
elements=st.integers(min_value=0, max_value=1),
dtype=np.float32),
dY=hu.arrays(dims=[1],
elements=hu.floats(min_value=1, max_value=10)),
**hu.gcs_cpu_only)
def test_pair_wise_loss_gradient(self, X, label, dY, gc, dc):
workspace.FeedBlob('X', X)
workspace.FeedBlob('dY', dY)
workspace.FeedBlob('label', label)
net = core.Net('net')
net.PairWiseLossGradient(
['X', 'label', 'dY'],
['dX'],
)
plan = core.Plan('predict_data')
plan.AddStep(core.execution_step('predict_data',
[net], num_iter=1))
workspace.RunPlan(plan)
dx = workspace.FetchBlob('dX')
sign = 1 if label[0] > label[1] else -1
if label[0] == label[1]:
self.assertEqual(np.asscalar(dx[0]), 0)
return
self.assertAlmostEqual(
np.asscalar(dx[0]),
np.asscalar(-dY[0] * sign / (1 + np.exp(sign * (X[0] - X[1])))),
delta=1e-2 * abs(np.asscalar(dx[0])))
self.assertEqual(np.asscalar(dx[0]), np.asscalar(-dx[1]))
delta = 1e-3
up_x = np.array([[X[0] + delta], [X[1]]], dtype=np.float32)
down_x = np.array([[X[0] - delta], [X[1]]], dtype=np.float32)
workspace.FeedBlob('up_x', up_x)
workspace.FeedBlob('down_x', down_x)
new_net = core.Net('new_net')
new_net.PairWiseLoss(['up_x', 'label'], ['up_output'])
new_net.PairWiseLoss(['down_x', 'label'], ['down_output'])
plan = core.Plan('predict_data')
plan.AddStep(core.execution_step('predict_data', [new_net], num_iter=1))
workspace.RunPlan(plan)
down_output_pred = workspace.FetchBlob('down_output')
up_output_pred = workspace.FetchBlob('up_output')
np.testing.assert_allclose(
np.asscalar(dx[0]),
np.asscalar(
0.5 * dY[0] *
(up_output_pred[0] - down_output_pred[0]) / delta),
rtol=1e-2, atol=1e-2)
@serial.given(n=st.integers(0, 10), k=st.integers(1, 5), **hu.gcs_cpu_only)
def test_pair_wise_loss_batch(self, n, k, gc, dc):
lengths = np.random.randint(k, size=n).astype(np.int32) + 1
X = np.random.rand(sum(lengths)).astype(np.float32)
label = np.random.randint(k, size=sum(lengths)).astype(np.float32)
def pair_wise_op(X, label, lengths):
N = lengths.size
output = np.zeros(N).astype(np.float32)
def f(x):
return np.log(1 + np.exp(x))
offset = 0
for idx in range(N):
offset += lengths[idx - 1] if idx > 0 else 0
count = 0
for i in range(offset, offset + lengths[idx]):
for j in range(offset, i):
if label[i] == label[j]:
continue
sign = 1 if label[i] > label[j] else -1
output[idx] += f(sign * (X[j] - X[i]))
count += 1
if count > 0:
output[idx] /= count
return [output]
op = core.CreateOperator(
'PairWiseLoss',
['X', 'label', 'lengths'],
'out'
)
# Check against numpy reference
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label, lengths],
reference=pair_wise_op,
)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, label, lengths], [0])
# Gradient check
self.assertGradientChecks(gc, op, [X, label, lengths], 0, [0])
|
pytorch-master
|
caffe2/python/operator_test/rank_loss_operator_test.py
|
try:
import cv2
except ImportError:
pass # skip if opencv is not available
import numpy as np
# === copied from utils/keypoints.py as reference ===
_NUM_KEYPOINTS = -1 # cfg.KRCNN.NUM_KEYPOINTS
_INFERENCE_MIN_SIZE = 0 # cfg.KRCNN.INFERENCE_MIN_SIZE
def heatmaps_to_keypoints(maps, rois):
"""Extracts predicted keypoint locations from heatmaps. Output has shape
(#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob)
for each keypoint.
"""
# This function converts a discrete image coordinate in a HEATMAP_SIZE x
# HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain
# consistency with keypoints_to_heatmap_labels by using the conversion from
# Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a
# continuous coordinate.
offset_x = rois[:, 0]
offset_y = rois[:, 1]
widths = rois[:, 2] - rois[:, 0]
heights = rois[:, 3] - rois[:, 1]
widths = np.maximum(widths, 1)
heights = np.maximum(heights, 1)
widths_ceil = np.ceil(widths).astype(np.int)
heights_ceil = np.ceil(heights).astype(np.int)
num_keypoints = np.maximum(maps.shape[1], _NUM_KEYPOINTS)
# NCHW to NHWC for use with OpenCV
maps = np.transpose(maps, [0, 2, 3, 1])
min_size = _INFERENCE_MIN_SIZE
xy_preds = np.zeros(
(len(rois), 4, num_keypoints), dtype=np.float32)
for i in range(len(rois)):
if min_size > 0:
roi_map_width = int(np.maximum(widths_ceil[i], min_size))
roi_map_height = int(np.maximum(heights_ceil[i], min_size))
else:
roi_map_width = widths_ceil[i]
roi_map_height = heights_ceil[i]
width_correction = widths[i] / roi_map_width
height_correction = heights[i] / roi_map_height
roi_map = cv2.resize(
maps[i], (roi_map_width, roi_map_height),
interpolation=cv2.INTER_CUBIC)
# Bring back to CHW
roi_map = np.transpose(roi_map, [2, 0, 1])
roi_map_probs = scores_to_probs(roi_map.copy())
w = roi_map.shape[2]
for k in range(num_keypoints):
pos = roi_map[k, :, :].argmax()
x_int = pos % w
y_int = (pos - x_int) // w
assert (roi_map_probs[k, y_int, x_int] ==
roi_map_probs[k, :, :].max())
x = (x_int + 0.5) * width_correction
y = (y_int + 0.5) * height_correction
xy_preds[i, 0, k] = x + offset_x[i]
xy_preds[i, 1, k] = y + offset_y[i]
xy_preds[i, 2, k] = roi_map[k, y_int, x_int]
xy_preds[i, 3, k] = roi_map_probs[k, y_int, x_int]
return xy_preds
def scores_to_probs(scores):
"""Transforms CxHxW of scores to probabilities spatially."""
channels = scores.shape[0]
for c in range(channels):
temp = scores[c, :, :]
max_score = temp.max()
temp = np.exp(temp - max_score) / np.sum(np.exp(temp - max_score))
scores[c, :, :] = temp
return scores
def approx_heatmap_keypoint(heatmaps_in, bboxes_in):
'''
Mask R-CNN uses bicubic upscaling before taking the maximum of the heat map
for keypoints. We are using bilinear upscaling, which means we can approximate
the maximum coordinate with the low dimension maximum coordinates. We would like
to avoid bicubic upscaling, because it is computationally expensive. Brown and
Lowe (Invariant Features from Interest Point Groups, 2002) uses a method for
fitting a 3D quadratic function to the local sample points to determine the
interpolated location of the maximum of scale space, and his experiments showed
that this provides a substantial improvement to matching and stability for
keypoint extraction. This approach uses the Taylor expansion (up to the
quadratic terms) of the scale-space function. It is equivalent with the Newton
method. This efficient method were used in many keypoint estimation algorithms
like SIFT, SURF etc...
The implementation of Newton methods with numerical analysis is straight forward
and super simple, though we need a linear solver.
'''
assert len(bboxes_in.shape) == 2
N = bboxes_in.shape[0]
assert bboxes_in.shape[1] == 4
assert len(heatmaps_in.shape) == 4
assert heatmaps_in.shape[0] == N
keypoint_count = heatmaps_in.shape[1]
heatmap_size = heatmaps_in.shape[2]
assert heatmap_size >= 2
assert heatmaps_in.shape[3] == heatmap_size
keypoints_out = np.zeros((N, keypoint_count, 4))
for k in range(N):
x0, y0, x1, y1 = bboxes_in[k, :]
xLen = np.maximum(x1 - x0, 1)
yLen = np.maximum(y1 - y0, 1)
softmax_map = scores_to_probs(heatmaps_in[k, :, :, :].copy())
f = heatmaps_in[k]
for j in range(keypoint_count):
f = heatmaps_in[k][j]
maxX = -1
maxY = -1
maxScore = -100.0
maxProb = -100.0
for y in range(heatmap_size):
for x in range(heatmap_size):
score = f[y, x]
prob = softmax_map[j, y, x]
if maxX < 0 or maxScore < score:
maxScore = score
maxProb = prob
maxX = x
maxY = y
# print(maxScore, maxX, maxY)
# initialize fmax values of 3x3 grid
# when 3x3 grid going out-of-bound, mirrowing around center
fmax = [[0] * 3 for r in range(3)]
for x in range(3):
for y in range(3):
hm_x = x + maxX - 1
hm_y = y + maxY - 1
hm_x = hm_x - 2 * (hm_x >= heatmap_size) + 2 * (hm_x < 0)
hm_y = hm_y - 2 * (hm_y >= heatmap_size) + 2 * (hm_y < 0)
assert((hm_x < heatmap_size) and (hm_x >= 0))
assert((hm_y < heatmap_size) and (hm_y >= 0))
fmax[y][x] = f[hm_y][hm_x]
# print("python fmax ", fmax)
# b = -f'(0), A = f''(0) Hessian matrix
b = [-(fmax[1][2] - fmax[1][0]) / 2, -
(fmax[2][1] - fmax[0][1]) / 2]
A = [[fmax[1][0] - 2 * fmax[1][1] + fmax[1][2],
(fmax[2][2] - fmax[2][0] - fmax[0][2] + fmax[0][0]) / 4],
[(fmax[2][2] - fmax[2][0] - fmax[0][2] + fmax[0][0]) / 4,
fmax[0][1] - 2 * fmax[1][1] + fmax[2][1]]]
# print("python A")
# print(A)
# solve Ax=b
div = A[1][1] * A[0][0] - A[0][1] * A[1][0]
if abs(div) < 0.0001:
deltaX = 0
deltaY = 0
deltaScore = maxScore
else:
deltaY = (b[1] * A[0][0] - b[0] * A[1][0]) / div
deltaX = (b[0] * A[1][1] - b[1] * A[0][1]) / div
# clip delta if going out-of-range of 3x3 grid
if abs(deltaX) > 1.5 or abs(deltaY) > 1.5:
scale = 1.5 / max(abs(deltaX), abs(deltaY))
deltaX *= scale
deltaY *= scale
# score = f(0) + f'(0)*x + 1/2 * f''(0) * x^2
# = f(0) - b*x + 1/2*x*A*x
deltaScore = (
fmax[1][1] - (b[0] * deltaX + b[1] * deltaY) +
0.5 * (deltaX * deltaX * A[0][0] +
deltaX * deltaY * A[1][0] +
deltaY * deltaX * A[0][1] +
deltaY * deltaY * A[1][1]))
assert abs(deltaX) <= 1.5
assert abs(deltaY) <= 1.5
# final coordinates
keypoints_out[k, j, :] = (
x0 + (maxX + deltaX + .5) * xLen / heatmap_size,
y0 + (maxY + deltaY + .5) * yLen / heatmap_size,
deltaScore,
maxProb,
)
keypoints_out = np.transpose(keypoints_out, [0, 2, 1])
return keypoints_out
|
pytorch-master
|
caffe2/python/operator_test/detectron_keypoints.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
from functools import partial
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
def _unique_ref(x, return_inverse):
ret = np.unique(x, return_inverse=return_inverse)
if not return_inverse:
ret = [ret]
return ret
class TestUniqueOps(serial.SerializedTestCase):
@given(
X=hu.tensor1d(
# allow empty
min_len=0,
dtype=np.int32,
# allow negatives
elements=st.integers(min_value=-10, max_value=10)),
return_remapping=st.booleans(),
**hu.gcs_no_hip
)
@settings(deadline=10000)
def test_unique_op(self, X, return_remapping, gc, dc):
# impl of unique op does not guarantees return order, sort the input
# so different impl return same outputs
X = np.sort(X)
op = core.CreateOperator(
"Unique",
['X'],
["U", "remap"] if return_remapping else ["U"],
)
self.assertDeviceChecks(
device_options=dc,
op=op,
inputs=[X],
outputs_to_check=[0, 1] if return_remapping else [0]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=partial(_unique_ref, return_inverse=return_remapping),
)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/unique_ops_test.py
|
import unittest
try:
import cv2
import lmdb
except ImportError:
pass # Handled below
from PIL import Image
import numpy as np
import shutil
import io
import sys
import tempfile
# TODO: This test does not test scaling because
# the algorithms used by OpenCV in the C and Python
# version seem to differ slightly. It does test
# most other features
from hypothesis import given, settings, Verbosity
import hypothesis.strategies as st
from caffe2.proto import caffe2_pb2
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import workspace, core
# Verification routines (applies transformations to image to
# verify if the operator produces same result)
def verify_apply_bounding_box(img, box):
import skimage.util
if any(type(box[f]) is not int or np.isnan(box[f] or box[f] < 0)
for f in range(0, 4)):
return img
# Box is ymin, xmin, bound_height, bound_width
y_bounds = (box[0], img.shape[0] - box[0] - box[2])
x_bounds = (box[1], img.shape[1] - box[1] - box[3])
c_bounds = (0, 0)
if any(el < 0 for el in list(y_bounds) + list(x_bounds) + list(c_bounds)):
return img
bboxed = skimage.util.crop(img, (y_bounds, x_bounds, c_bounds))
return bboxed
# This function is called but not used. It will trip on assert False if
# the arguments are wrong (improper example)
def verify_rescale(img, minsize):
# Here we use OpenCV transformation to match the C code
scale_amount = float(minsize) / min(img.shape[0], img.shape[1])
if scale_amount <= 1.0:
return img
print("Scale amount is %f -- should be < 1.0; got shape %s" %
(scale_amount, str(img.shape)))
assert False
img_cv = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
output_shape = (int(np.ceil(scale_amount * img_cv.shape[0])),
int(np.ceil(scale_amount * img_cv.shape[1])))
resized = cv2.resize(img_cv,
dsize=output_shape,
interpolation=cv2.INTER_AREA)
resized = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
assert resized.shape[0] >= minsize
assert resized.shape[1] >= minsize
return resized
def verify_crop(img, crop):
import skimage.util
assert img.shape[0] >= crop
assert img.shape[1] >= crop
y_offset = 0
if img.shape[0] > crop:
y_offset = (img.shape[0] - crop) // 2
x_offset = 0
if img.shape[1] > crop:
x_offset = (img.shape[1] - crop) // 2
y_bounds = (y_offset, img.shape[0] - crop - y_offset)
x_bounds = (x_offset, img.shape[1] - crop - x_offset)
c_bounds = (0, 0)
cropped = skimage.util.crop(img, (y_bounds, x_bounds, c_bounds))
assert cropped.shape[0] == crop
assert cropped.shape[1] == crop
return cropped
def verify_color_normalize(img, means, stds):
# Note the RGB/BGR inversion
# Operate on integers like the C version
img = img * 255.0
img[:, :, 0] = (img[:, :, 0] - means[2]) / stds[2]
img[:, :, 1] = (img[:, :, 1] - means[1]) / stds[1]
img[:, :, 2] = (img[:, :, 2] - means[0]) / stds[0]
return img * (1.0 / 255.0)
# Printing function (for debugging)
def caffe2_img(img):
# Convert RGB to BGR
img = img[:, :, (2, 1, 0)]
# Convert HWC to CHW
img = img.swapaxes(1, 2).swapaxes(0, 1)
img = img * 255.0
return img.astype(np.int32)
# Bounding box is ymin, xmin, height, width
def create_test(output_dir, width, height, default_bound, minsize, crop, means,
stds, count, label_type, num_labels, output1=None,
output2_size=None):
print("Creating a temporary lmdb database of %d pictures..." % (count))
if default_bound is None:
default_bound = [-1] * 4
LMDB_MAP_SIZE = 1 << 40
env = lmdb.open(output_dir, map_size=LMDB_MAP_SIZE, subdir=True)
index = 0
# Create images and the expected results
expected_results = []
with env.begin(write=True) as txn:
while index < count:
img_array = np.random.random_integers(
0, 255, [height, width, 3]).astype(np.uint8)
img_obj = Image.fromarray(img_array)
img_str = io.BytesIO()
img_obj.save(img_str, 'PNG')
# Create a random bounding box for every other image
# ymin, xmin, bound_height, bound_width
# TODO: To ensure that we never need to scale, we
# ensure that the bounding-box is larger than the
# minsize parameter
bounding_box = list(default_bound)
do_default_bound = True
if index % 2 == 0:
if height > minsize and width > minsize:
do_default_bound = False
bounding_box[0:2] = [np.random.randint(a) for a in
(height - minsize, width - minsize)]
bounding_box[2:4] = [np.random.randint(a) + minsize for a in
(height - bounding_box[0] - minsize + 1,
width - bounding_box[1] - minsize + 1)]
# print("Bounding box is %s" % (str(bounding_box)))
# Create expected result
img_expected = img_array.astype(np.float32) * (1.0 / 255.0)
# print("Orig image: %s" % (str(caffe2_img(img_expected))))
img_expected = verify_apply_bounding_box(
img_expected,
bounding_box)
# print("Bounded image: %s" % (str(caffe2_img(img_expected))))
img_expected = verify_rescale(img_expected, minsize)
img_expected = verify_crop(img_expected, crop)
# print("Crop image: %s" % (str(caffe2_img(img_expected))))
img_expected = verify_color_normalize(img_expected, means, stds)
# print("Color image: %s" % (str(caffe2_img(img_expected))))
tensor_protos = caffe2_pb2.TensorProtos()
image_tensor = tensor_protos.protos.add()
image_tensor.data_type = 4 # string data
image_tensor.string_data.append(img_str.getvalue())
img_str.close()
label_tensor = tensor_protos.protos.add()
label_tensor.data_type = 2 # int32 data
assert (label_type >= 0 and label_type <= 3)
if label_type == 0:
label_tensor.int32_data.append(index)
expected_label = index
elif label_type == 1:
binary_labels = np.random.randint(2, size=num_labels)
for idx, val in enumerate(binary_labels.tolist()):
if val == 1:
label_tensor.int32_data.append(idx)
expected_label = binary_labels
elif label_type == 2:
embedding_label = np.random.randint(100, size=num_labels)
for _idx, val in enumerate(embedding_label.tolist()):
label_tensor.int32_data.append(val)
expected_label = embedding_label
elif label_type == 3:
weight_tensor = tensor_protos.protos.add()
weight_tensor.data_type = 1 # float weights
binary_labels = np.random.randint(2, size=num_labels)
expected_label = np.zeros(num_labels).astype(np.float32)
for idx, val in enumerate(binary_labels.tolist()):
expected_label[idx] = val * idx
if val == 1:
label_tensor.int32_data.append(idx)
weight_tensor.float_data.append(idx)
if output1:
output1_tensor = tensor_protos.protos.add()
output1_tensor.data_type = 1 # float data
output1_tensor.float_data.append(output1)
output2 = []
if output2_size:
output2_tensor = tensor_protos.protos.add()
output2_tensor.data_type = 2 # int32 data
values = np.random.randint(1024, size=output2_size)
for val in values.tolist():
output2.append(val)
output2_tensor.int32_data.append(val)
expected_results.append(
[caffe2_img(img_expected), expected_label, output1, output2])
if not do_default_bound:
bounding_tensor = tensor_protos.protos.add()
bounding_tensor.data_type = 2 # int32 data
bounding_tensor.int32_data.extend(bounding_box)
txn.put(
'{}'.format(index).encode('ascii'),
tensor_protos.SerializeToString()
)
index = index + 1
# End while
# End with
return expected_results
def run_test(
size_tuple, means, stds, label_type, num_labels, is_test, scale_jitter_type,
color_jitter, color_lighting, dc, validator, output1=None, output2_size=None):
# TODO: Does not test on GPU and does not test use_gpu_transform
# WARNING: Using ModelHelper automatically does NHWC to NCHW
# transformation if needed.
width, height, minsize, crop = size_tuple
means = [float(m) for m in means]
stds = [float(s) for s in stds]
out_dir = tempfile.mkdtemp()
count_images = 2 # One with bounding box and one without
expected_images = create_test(
out_dir,
width=width,
height=height,
default_bound=(3, 5, height - 3, width - 5),
minsize=minsize,
crop=crop,
means=means,
stds=stds,
count=count_images,
label_type=label_type,
num_labels=num_labels,
output1=output1,
output2_size=output2_size
)
for device_option in dc:
with hu.temp_workspace():
reader_net = core.Net('reader')
reader_net.CreateDB(
[],
'DB',
db=out_dir,
db_type="lmdb"
)
workspace.RunNetOnce(reader_net)
outputs = ['data', 'label']
output_sizes = []
if output1:
outputs.append('output1')
output_sizes.append(1)
if output2_size:
outputs.append('output2')
output_sizes.append(output2_size)
imageop = core.CreateOperator(
'ImageInput',
['DB'],
outputs,
batch_size=count_images,
color=3,
minsize=minsize,
crop=crop,
is_test=is_test,
bounding_ymin=3,
bounding_xmin=5,
bounding_height=height - 3,
bounding_width=width - 5,
mean_per_channel=means,
std_per_channel=stds,
use_gpu_transform=(device_option.device_type == 1),
label_type=label_type,
num_labels=num_labels,
output_sizes=output_sizes,
scale_jitter_type=scale_jitter_type,
color_jitter=color_jitter,
color_lighting=color_lighting
)
imageop.device_option.CopyFrom(device_option)
main_net = core.Net('main')
main_net.Proto().op.extend([imageop])
workspace.RunNetOnce(main_net)
validator(expected_images, device_option, count_images)
# End for
# End with
# End for
shutil.rmtree(out_dir)
# end run_test
@unittest.skipIf('cv2' not in sys.modules, 'python-opencv is not installed')
@unittest.skipIf('lmdb' not in sys.modules, 'python-lmdb is not installed')
class TestImport(hu.HypothesisTestCase):
def validate_image_and_label(
self, expected_images, device_option, count_images, label_type,
is_test, scale_jitter_type, color_jitter, color_lighting):
l = workspace.FetchBlob('label')
result = workspace.FetchBlob('data').astype(np.int32)
# If we don't use_gpu_transform, the output is in NHWC
# Our reference output is CHW so we swap
if device_option.device_type != 1:
expected = [img.swapaxes(0, 1).swapaxes(1, 2) for
(img, _, _, _) in expected_images]
else:
expected = [img for (img, _, _, _) in expected_images]
for i in range(count_images):
if label_type == 0:
self.assertEqual(l[i], expected_images[i][1])
else:
self.assertEqual(
(l[i] - expected_images[i][1] > 0).sum(), 0)
if is_test == 0:
# when traing data preparation is randomized (e.g. random cropping,
# Inception-style random sized cropping, color jittering,
# color lightin), we only compare blob shape
for (s1, s2) in zip(expected[i].shape, result[i].shape):
self.assertEqual(s1, s2)
else:
self.assertEqual((expected[i] - result[i] > 1).sum(), 0)
# End for
# end validate_image_and_label
@given(size_tuple=st.tuples(
st.integers(min_value=8, max_value=4096),
st.integers(min_value=8, max_value=4096)).flatmap(lambda t: st.tuples(
st.just(t[0]), st.just(t[1]),
st.just(min(t[0] - 6, t[1] - 4)),
st.integers(min_value=1, max_value=min(t[0] - 6, t[1] - 4)))),
means=st.tuples(st.integers(min_value=0, max_value=255),
st.integers(min_value=0, max_value=255),
st.integers(min_value=0, max_value=255)),
stds=st.tuples(st.floats(min_value=1, max_value=10),
st.floats(min_value=1, max_value=10),
st.floats(min_value=1, max_value=10)),
label_type=st.integers(0, 3),
num_labels=st.integers(min_value=8, max_value=4096),
is_test=st.integers(min_value=0, max_value=1),
scale_jitter_type=st.integers(min_value=0, max_value=1),
color_jitter=st.integers(min_value=0, max_value=1),
color_lighting=st.integers(min_value=0, max_value=1),
**hu.gcs)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_imageinput(
self, size_tuple, means, stds, label_type,
num_labels, is_test, scale_jitter_type, color_jitter, color_lighting,
gc, dc):
def validator(expected_images, device_option, count_images):
self.validate_image_and_label(
expected_images, device_option, count_images, label_type,
is_test, scale_jitter_type, color_jitter, color_lighting)
# End validator
run_test(
size_tuple, means, stds, label_type, num_labels, is_test,
scale_jitter_type, color_jitter, color_lighting, dc, validator)
# End test_imageinput
@given(size_tuple=st.tuples(
st.integers(min_value=8, max_value=4096),
st.integers(min_value=8, max_value=4096)).flatmap(lambda t: st.tuples(
st.just(t[0]), st.just(t[1]),
st.just(min(t[0] - 6, t[1] - 4)),
st.integers(min_value=1, max_value=min(t[0] - 6, t[1] - 4)))),
means=st.tuples(st.integers(min_value=0, max_value=255),
st.integers(min_value=0, max_value=255),
st.integers(min_value=0, max_value=255)),
stds=st.tuples(st.floats(min_value=1, max_value=10),
st.floats(min_value=1, max_value=10),
st.floats(min_value=1, max_value=10)),
label_type=st.integers(0, 3),
num_labels=st.integers(min_value=8, max_value=4096),
is_test=st.integers(min_value=0, max_value=1),
scale_jitter_type=st.integers(min_value=0, max_value=1),
color_jitter=st.integers(min_value=0, max_value=1),
color_lighting=st.integers(min_value=0, max_value=1),
output1=st.floats(min_value=1, max_value=10),
output2_size=st.integers(min_value=2, max_value=10),
**hu.gcs)
@settings(verbosity=Verbosity.verbose, max_examples=10, deadline=None)
def test_imageinput_with_additional_outputs(
self, size_tuple, means, stds, label_type,
num_labels, is_test, scale_jitter_type, color_jitter, color_lighting,
output1, output2_size, gc, dc):
def validator(expected_images, device_option, count_images):
self.validate_image_and_label(
expected_images, device_option, count_images, label_type,
is_test, scale_jitter_type, color_jitter, color_lighting)
output1_result = workspace.FetchBlob('output1')
output2_result = workspace.FetchBlob('output2')
for i in range(count_images):
self.assertEqual(output1_result[i], expected_images[i][2])
self.assertEqual(
(output2_result[i] - expected_images[i][3] > 0).sum(), 0)
# End for
# End validator
run_test(
size_tuple, means, stds, label_type, num_labels, is_test,
scale_jitter_type, color_jitter, color_lighting, dc,
validator, output1, output2_size)
# End test_imageinput
if __name__ == '__main__':
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/image_input_op_test.py
|
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase, rand_array
class TestPartitionOps(TestCase):
def test_configs(self):
# (main dims, partitions, main type, [list of (extra dims, type)])
configs = [
((10, ), 3),
((4, ), 10),
((10, 10), 4),
((100, ), 2),
((5, ), 1),
((1, ), 1),
((2, 10), 2),
]
suffixes = [
[],
[((2, 2), np.float32)],
[((3, ), np.int64), ((2, ), np.float32)],
]
return [
(main_dims, parts, main_type, extra, pack)
for main_dims, parts in configs
for main_type in [np.int32, np.int64] for extra in suffixes
for pack in [False, True]
]
def testPartition(self):
for main_dims, parts, main_type, extra_ins, pack in self.test_configs():
ins = ['in' + str(i) for i in range(1 + len(extra_ins))]
outs = [
'in{}_p{}'.format(j, i)
for i in range(parts) for j in range(1 + len(extra_ins))
]
op = core.CreateOperator(
'Partition', ins, outs, pack_first_input=(1 if pack else 0))
x = []
for i, (dims, t) in enumerate([((), main_type)] + extra_ins):
if t in [np.float32, np.float64]:
d = rand_array(*(main_dims + dims))
else:
d = np.random.randint(-100, 100, (main_dims + dims))
d = d.astype(t)
workspace.FeedBlob(ins[i], d)
x.append(d)
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
workspace.RunOperatorOnce(op)
ref = sharding(x)
print(x)
print(ref)
for name, expected in zip(outs, ref):
np.testing.assert_array_equal(
expected, workspace.FetchBlob(name)
)
# test inverse operation (GatherByKey)
if len(main_dims) == 1:
# currently only 1D key tensor supported
for i in range(len(extra_ins)):
expected_out = ins[i + 1]
gather_ins = [ins[0]] + [
outs[len(ins) * p + i + 1] for p in range(parts)]
actual_out = expected_out + '_actual'
op = core.CreateOperator(
'GatherByKey', gather_ins, actual_out)
workspace.RunOperatorOnce(op)
expected = workspace.FetchBlob(expected_out)
actual = workspace.FetchBlob(actual_out)
np.testing.assert_array_equal(expected, actual)
def testLengthsPartition(self):
for main_dims, parts, main_type, extra_ins, pack in self.test_configs():
# For LengthsSharding only 1-D tensors supported as a first input
if len(main_dims) > 1:
continue
ins = ['in' + str(i) for i in range(2 + len(extra_ins))]
outs = [
'in{}_p{}'.format(j, i)
for i in range(parts) for j in range(2 + len(extra_ins))
]
op = core.CreateOperator(
'LengthsPartition', ins, outs,
pack_first_input=(1 if pack else 0)
)
x = []
for i, (dims, t) in enumerate([((), main_type)] + extra_ins):
if t in [np.float32, np.float64]:
d = rand_array(*(main_dims + dims))
else:
d = np.random.randint(-100, 100, (main_dims + dims))
d = d.astype(t)
workspace.FeedBlob(ins[i + 1], d)
x.append(d)
# Randomly generate length tensor as well
elements = np.random.randint(2, 10)
lengths = []
total_length = 0
for _ in range(elements - 1):
lengths.append(np.random.randint(main_dims[0] - total_length))
total_length += lengths[-1]
lengths.append(main_dims[0] - total_length)
workspace.FeedBlob(ins[0], np.array(lengths, dtype=np.int32))
def sharding(x):
# numpy has proper modulo op that yields non-negative results
shards = (x[0] % parts).reshape([-1])
out = []
for i in range(parts):
idx = 0
sharded_lengths = np.zeros(elements)
for ind, length in enumerate(lengths):
for _ in range(length):
if shards[idx] == i:
sharded_lengths[ind] += 1
idx += 1
out.append(sharded_lengths)
for ind, v in enumerate(x):
suffix_shape = v.shape[len(x[0].shape):]
accum = []
data = v.reshape((-1, ) + suffix_shape)
if pack and ind == 0:
data = data // parts
for j, s in enumerate(shards):
if s == i:
accum.append(data[j])
def join(a):
if not a:
return np.empty(shape=(0, ) + suffix_shape)
return np.stack(a)
out.append(join(accum))
return out
workspace.RunOperatorOnce(op)
ref = sharding(x)
for name, expected in zip(outs, ref):
np.testing.assert_array_equal(
expected, workspace.FetchBlob(name)
)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/partition_ops_test.py
|
from caffe2.python import core
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
# Reference implementation from detectron/lib/utils/boxes.py
def bbox_transform(boxes, deltas, weights=(1.0, 1.0, 1.0, 1.0)):
"""Forward transform that maps proposal boxes to predicted ground-truth
boxes using bounding-box regression deltas. See bbox_transform_inv for a
description of the weights argument.
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
widths = boxes[:, 2] - boxes[:, 0] + 1.0
heights = boxes[:, 3] - boxes[:, 1] + 1.0
ctr_x = boxes[:, 0] + 0.5 * widths
ctr_y = boxes[:, 1] + 0.5 * heights
wx, wy, ww, wh = weights
dx = deltas[:, 0::4] / wx
dy = deltas[:, 1::4] / wy
dw = deltas[:, 2::4] / ww
dh = deltas[:, 3::4] / wh
# Prevent sending too large values into np.exp()
BBOX_XFORM_CLIP = np.log(1000. / 16.)
dw = np.minimum(dw, BBOX_XFORM_CLIP)
dh = np.minimum(dh, BBOX_XFORM_CLIP)
pred_ctr_x = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_ctr_y = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_w = np.exp(dw) * widths[:, np.newaxis]
pred_h = np.exp(dh) * heights[:, np.newaxis]
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
# x1
pred_boxes[:, 0::4] = pred_ctr_x - 0.5 * pred_w
# y1
pred_boxes[:, 1::4] = pred_ctr_y - 0.5 * pred_h
# x2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 2::4] = pred_ctr_x + 0.5 * pred_w - 1
# y2 (note: "- 1" is correct; don't be fooled by the asymmetry)
pred_boxes[:, 3::4] = pred_ctr_y + 0.5 * pred_h - 1
return pred_boxes
# Reference implementation from detectron/lib/utils/boxes.py
def clip_tiled_boxes(boxes, im_shape):
"""Clip boxes to image boundaries. im_shape is [height, width] and boxes
has shape (N, 4 * num_tiled_boxes)."""
assert (
boxes.shape[1] % 4 == 0
), "boxes.shape[1] is {:d}, but must be divisible by 4.".format(
boxes.shape[1]
)
# x1 >= 0
boxes[:, 0::4] = np.maximum(np.minimum(boxes[:, 0::4], im_shape[1] - 1), 0)
# y1 >= 0
boxes[:, 1::4] = np.maximum(np.minimum(boxes[:, 1::4], im_shape[0] - 1), 0)
# x2 < im_shape[1]
boxes[:, 2::4] = np.maximum(np.minimum(boxes[:, 2::4], im_shape[1] - 1), 0)
# y2 < im_shape[0]
boxes[:, 3::4] = np.maximum(np.minimum(boxes[:, 3::4], im_shape[0] - 1), 0)
return boxes
def generate_rois(roi_counts, im_dims):
assert len(roi_counts) == len(im_dims)
all_rois = []
for i, num_rois in enumerate(roi_counts):
if num_rois == 0:
continue
# [batch_idx, x1, y1, x2, y2]
rois = np.random.uniform(0, im_dims[i], size=(roi_counts[i], 5)).astype(
np.float32
)
rois[:, 0] = i # batch_idx
# Swap (x1, x2) if x1 > x2
rois[:, 1], rois[:, 3] = (
np.minimum(rois[:, 1], rois[:, 3]),
np.maximum(rois[:, 1], rois[:, 3]),
)
# Swap (y1, y2) if y1 > y2
rois[:, 2], rois[:, 4] = (
np.minimum(rois[:, 2], rois[:, 4]),
np.maximum(rois[:, 2], rois[:, 4]),
)
all_rois.append(rois)
if len(all_rois) > 0:
return np.vstack(all_rois)
return np.empty((0, 5)).astype(np.float32)
def bbox_transform_rotated(
boxes,
deltas,
weights=(1.0, 1.0, 1.0, 1.0),
angle_bound_on=True,
angle_bound_lo=-90,
angle_bound_hi=90,
):
"""
Similar to bbox_transform but for rotated boxes with angle info.
"""
if boxes.shape[0] == 0:
return np.zeros((0, deltas.shape[1]), dtype=deltas.dtype)
boxes = boxes.astype(deltas.dtype, copy=False)
ctr_x = boxes[:, 0]
ctr_y = boxes[:, 1]
widths = boxes[:, 2]
heights = boxes[:, 3]
angles = boxes[:, 4]
wx, wy, ww, wh = weights
dx = deltas[:, 0::5] / wx
dy = deltas[:, 1::5] / wy
dw = deltas[:, 2::5] / ww
dh = deltas[:, 3::5] / wh
da = deltas[:, 4::5] * 180.0 / np.pi
# Prevent sending too large values into np.exp()
BBOX_XFORM_CLIP = np.log(1000. / 16.)
dw = np.minimum(dw, BBOX_XFORM_CLIP)
dh = np.minimum(dh, BBOX_XFORM_CLIP)
pred_boxes = np.zeros(deltas.shape, dtype=deltas.dtype)
pred_boxes[:, 0::5] = dx * widths[:, np.newaxis] + ctr_x[:, np.newaxis]
pred_boxes[:, 1::5] = dy * heights[:, np.newaxis] + ctr_y[:, np.newaxis]
pred_boxes[:, 2::5] = np.exp(dw) * widths[:, np.newaxis]
pred_boxes[:, 3::5] = np.exp(dh) * heights[:, np.newaxis]
pred_angle = da + angles[:, np.newaxis]
if angle_bound_on:
period = angle_bound_hi - angle_bound_lo
assert period % 180 == 0
pred_angle[np.where(pred_angle < angle_bound_lo)] += period
pred_angle[np.where(pred_angle > angle_bound_hi)] -= period
pred_boxes[:, 4::5] = pred_angle
return pred_boxes
def clip_tiled_boxes_rotated(boxes, im_shape, angle_thresh=1.0):
"""
Similar to clip_tiled_boxes but for rotated boxes with angle info.
Only clips almost horizontal boxes within angle_thresh. The rest are
left unchanged.
"""
assert (
boxes.shape[1] % 5 == 0
), "boxes.shape[1] is {:d}, but must be divisible by 5.".format(
boxes.shape[1]
)
(H, W) = im_shape[:2]
# Filter boxes that are almost upright within angle_thresh tolerance
idx = np.where(np.abs(boxes[:, 4::5]) <= angle_thresh)
idx5 = idx[1] * 5
# convert to (x1, y1, x2, y2)
x1 = boxes[idx[0], idx5] - (boxes[idx[0], idx5 + 2] - 1) / 2.0
y1 = boxes[idx[0], idx5 + 1] - (boxes[idx[0], idx5 + 3] - 1) / 2.0
x2 = boxes[idx[0], idx5] + (boxes[idx[0], idx5 + 2] - 1) / 2.0
y2 = boxes[idx[0], idx5 + 1] + (boxes[idx[0], idx5 + 3] - 1) / 2.0
# clip
x1 = np.maximum(np.minimum(x1, W - 1), 0)
y1 = np.maximum(np.minimum(y1, H - 1), 0)
x2 = np.maximum(np.minimum(x2, W - 1), 0)
y2 = np.maximum(np.minimum(y2, H - 1), 0)
# convert back to (xc, yc, w, h)
boxes[idx[0], idx5] = (x1 + x2) / 2.0
boxes[idx[0], idx5 + 1] = (y1 + y2) / 2.0
boxes[idx[0], idx5 + 2] = x2 - x1 + 1
boxes[idx[0], idx5 + 3] = y2 - y1 + 1
return boxes
def generate_rois_rotated(roi_counts, im_dims):
rois = generate_rois(roi_counts, im_dims)
# [batch_id, ctr_x, ctr_y, w, h, angle]
rotated_rois = np.empty((rois.shape[0], 6)).astype(np.float32)
rotated_rois[:, 0] = rois[:, 0] # batch_id
rotated_rois[:, 1] = (rois[:, 1] + rois[:, 3]) / 2. # ctr_x = (x1 + x2) / 2
rotated_rois[:, 2] = (rois[:, 2] + rois[:, 4]) / 2. # ctr_y = (y1 + y2) / 2
rotated_rois[:, 3] = rois[:, 3] - rois[:, 1] + 1.0 # w = x2 - x1 + 1
rotated_rois[:, 4] = rois[:, 4] - rois[:, 2] + 1.0 # h = y2 - y1 + 1
rotated_rois[:, 5] = np.random.uniform(-90.0, 90.0) # angle in degrees
return rotated_rois
class TestBBoxTransformOp(serial.SerializedTestCase):
@given(
num_rois=st.integers(1, 10),
num_classes=st.integers(1, 10),
im_dim=st.integers(100, 600),
skip_batch_id=st.booleans(),
rotated=st.booleans(),
angle_bound_on=st.booleans(),
clip_angle_thresh=st.sampled_from([-1.0, 1.0]),
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_bbox_transform(
self,
num_rois,
num_classes,
im_dim,
skip_batch_id,
rotated,
angle_bound_on,
clip_angle_thresh,
gc,
dc,
):
"""
Test with all rois belonging to a single image per run.
"""
rois = (
generate_rois_rotated([num_rois], [im_dim])
if rotated
else generate_rois([num_rois], [im_dim])
)
box_dim = 5 if rotated else 4
if skip_batch_id:
rois = rois[:, 1:]
deltas = np.random.randn(num_rois, box_dim * num_classes).astype(np.float32)
im_info = np.array([im_dim, im_dim, 1.0]).astype(np.float32).reshape(1, 3)
def bbox_transform_ref(rois, deltas, im_info):
boxes = rois if rois.shape[1] == box_dim else rois[:, 1:]
im_shape = im_info[0, 0:2]
if rotated:
box_out = bbox_transform_rotated(
boxes, deltas, angle_bound_on=angle_bound_on
)
box_out = clip_tiled_boxes_rotated(
box_out, im_shape, angle_thresh=clip_angle_thresh
)
else:
box_out = bbox_transform(boxes, deltas)
box_out = clip_tiled_boxes(box_out, im_shape)
return [box_out]
op = core.CreateOperator(
"BBoxTransform",
["rois", "deltas", "im_info"],
["box_out"],
apply_scale=False,
correct_transform_coords=True,
rotated=rotated,
angle_bound_on=angle_bound_on,
clip_angle_thresh=clip_angle_thresh,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[rois, deltas, im_info],
reference=bbox_transform_ref,
)
@given(
roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10),
num_classes=st.integers(1, 10),
rotated=st.booleans(),
angle_bound_on=st.booleans(),
clip_angle_thresh=st.sampled_from([-1.0, 1.0]),
**hu.gcs_cpu_only
)
@settings(deadline=10000)
def test_bbox_transform_batch(
self,
roi_counts,
num_classes,
rotated,
angle_bound_on,
clip_angle_thresh,
gc,
dc,
):
"""
Test with rois for multiple images in a batch
"""
batch_size = len(roi_counts)
total_rois = sum(roi_counts)
im_dims = np.random.randint(100, 600, batch_size)
rois = (
generate_rois_rotated(roi_counts, im_dims)
if rotated
else generate_rois(roi_counts, im_dims)
)
box_dim = 5 if rotated else 4
deltas = np.random.randn(total_rois, box_dim * num_classes).astype(np.float32)
im_info = np.zeros((batch_size, 3)).astype(np.float32)
im_info[:, 0] = im_dims
im_info[:, 1] = im_dims
im_info[:, 2] = 1.0
def bbox_transform_ref(rois, deltas, im_info):
box_out = []
offset = 0
for i, num_rois in enumerate(roi_counts):
if num_rois == 0:
continue
cur_boxes = rois[offset : offset + num_rois, 1:]
cur_deltas = deltas[offset : offset + num_rois]
im_shape = im_info[i, 0:2]
if rotated:
cur_box_out = bbox_transform_rotated(
cur_boxes, cur_deltas, angle_bound_on=angle_bound_on
)
cur_box_out = clip_tiled_boxes_rotated(
cur_box_out, im_shape, angle_thresh=clip_angle_thresh
)
else:
cur_box_out = bbox_transform(cur_boxes, cur_deltas)
cur_box_out = clip_tiled_boxes(cur_box_out, im_shape)
box_out.append(cur_box_out)
offset += num_rois
if len(box_out) > 0:
box_out = np.vstack(box_out)
else:
box_out = np.empty(deltas.shape).astype(np.float32)
return [box_out, roi_counts]
op = core.CreateOperator(
"BBoxTransform",
["rois", "deltas", "im_info"],
["box_out", "roi_batch_splits"],
apply_scale=False,
correct_transform_coords=True,
rotated=rotated,
angle_bound_on=angle_bound_on,
clip_angle_thresh=clip_angle_thresh,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[rois, deltas, im_info],
reference=bbox_transform_ref,
)
|
pytorch-master
|
caffe2/python/operator_test/bbox_transform_test.py
|
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import hypothesis.strategies as st
class LpnormTest(hu.HypothesisTestCase):
def _test_Lp_Norm(self, inputs, gc, dc):
X = inputs[0]
# avoid kinks by moving away from 0
X += 0.02 * np.sign(X)
X[X == 0.0] += 0.02
self.ws.create_blob("X").feed(X)
op = core.CreateOperator(
'LpNorm',
['X'],
['l1_norm'],
p=1,
)
self.ws.run(op)
np.testing.assert_allclose(self.ws.blobs[("l1_norm")].fetch(),
np.linalg.norm((X).flatten(), ord=1),
rtol=1e-4, atol=1e-4)
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0], stepsize=1e-2, threshold=1e-2)
op = core.CreateOperator(
'LpNorm',
['X'],
['l2_norm'],
p=2,
)
self.ws.run(op)
np.testing.assert_allclose(
self.ws.blobs[("l2_norm")].fetch(),
np.linalg.norm((X).flatten(), ord=2)**2,
rtol=1e-4,
atol=1e-4
)
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0], stepsize=1e-2, threshold=1e-2)
op = core.CreateOperator(
'LpNorm',
['X'],
['l2_averaged_norm'],
p=2,
average=True
)
self.ws.run(op)
np.testing.assert_allclose(
self.ws.blobs[("l2_averaged_norm")].fetch(),
np.linalg.norm((X).flatten(), ord=2)**2 / X.size,
rtol=1e-4,
atol=1e-4
)
@given(inputs=hu.tensors(n=1,
min_dim=1,
max_dim=3,
dtype=np.float32),
**hu.gcs)
@settings(deadline=10000)
def test_Lp_Norm(self, inputs, gc, dc):
self._test_Lp_Norm(inputs, gc, dc)
def test_Lp_Norm_empty(self):
self._test_Lp_Norm([np.array([], dtype=np.float32)], hu.cpu_do, [hu.cpu_do])
self.assertEqual(self.ws.blobs["l1_norm"].fetch()[0], 0.0)
self.assertEqual(self.ws.blobs["l2_norm"].fetch()[0], 0.0)
self.assertTrue(np.isnan(self.ws.blobs["l2_averaged_norm"].fetch()[0]))
@given(x=hu.tensor(
min_dim=1, max_dim=10, dtype=np.float32,
elements=st.integers(min_value=-100, max_value=100)),
p=st.integers(1, 2),
average=st.integers(0, 1)
)
def test_lpnorm_shape_inference(self, x, p, average):
workspace.FeedBlob('x', x)
net = core.Net("lpnorm_test")
result = net.LpNorm(['x'], p=p, average=bool(average))
(shapes, types) = workspace.InferShapesAndTypes([net])
workspace.RunNetOnce(net)
self.assertEqual(shapes[result], list(workspace.blobs[result].shape))
self.assertEqual(types[result], core.DataType.FLOAT)
|
pytorch-master
|
caffe2/python/operator_test/lpnorm_op_test.py
|
import numpy as np
import torch
import sys
import unittest
from scipy import interpolate
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, utils
from caffe2.proto import caffe2_pb2
import caffe2.python.operator_test.detectron_keypoints as keypoint_utils
NUM_TEST_ROI = 14
NUM_KEYPOINTS = 19
HEATMAP_SIZE = 56
def heatmap_FAIR_keypoint_ref(maps, rois):
return [keypoint_utils.heatmaps_to_keypoints(maps, rois)]
def heatmap_approx_keypoint_ref(maps, rois):
return [keypoint_utils.approx_heatmap_keypoint(maps, rois)]
def c10_op_ref(maps, rois):
keypoints = torch.ops._caffe2.HeatmapMaxKeypoint(
torch.tensor(maps),
torch.tensor(rois),
should_output_softmax=True,
)
return [keypoints.numpy()]
class TestHeatmapMaxKeypointOp(hu.HypothesisTestCase):
def setUp(self):
super(TestHeatmapMaxKeypointOp, self).setUp()
np.random.seed(0)
# initial coordinates and interpolate HEATMAP_SIZE from it
HEATMAP_SMALL_SIZE = 4
bboxes_in = 500 * np.random.rand(NUM_TEST_ROI, 4).astype(np.float32)
# only bbox with smaller first coordinates
for i in range(NUM_TEST_ROI):
if bboxes_in[i][0] > bboxes_in[i][2]:
tmp = bboxes_in[i][2]
bboxes_in[i][2] = bboxes_in[i][0]
bboxes_in[i][0] = tmp
if bboxes_in[i][1] > bboxes_in[i][3]:
tmp = bboxes_in[i][3]
bboxes_in[i][3] = bboxes_in[i][1]
bboxes_in[i][1] = tmp
# initial randomized coordinates for heatmaps and expand it with interpolation
init = np.random.rand(
NUM_TEST_ROI,
NUM_KEYPOINTS,
HEATMAP_SMALL_SIZE,
HEATMAP_SMALL_SIZE).astype(np.float32)
heatmaps_in = np.zeros(
(NUM_TEST_ROI, NUM_KEYPOINTS, HEATMAP_SIZE, HEATMAP_SIZE)
).astype(np.float32)
for roi in range(NUM_TEST_ROI):
for keyp in range(NUM_KEYPOINTS):
f = interpolate.interp2d(
np.arange(0, 1, 1.0 / HEATMAP_SMALL_SIZE),
np.arange(0, 1, 1.0 / HEATMAP_SMALL_SIZE),
init[roi][keyp],
kind='cubic')
heatmaps_in[roi][keyp] = f(
np.arange(0, 1, 1.0 / HEATMAP_SIZE),
np.arange(0, 1, 1.0 / HEATMAP_SIZE))
self.heatmaps_in = heatmaps_in
self.bboxes_in = bboxes_in
self.op = core.CreateOperator(
'HeatmapMaxKeypoint',
['heatmaps_in', 'bboxes_in'],
['keypoints_out'],
arg=[
utils.MakeArgument("should_output_softmax", True),
],
device_option=caffe2_pb2.DeviceOption())
@unittest.skipIf('cv2' not in sys.modules, 'python-opencv is not installed')
def test_close_to_FAIR(self):
# 10 pixel error in scale of 500px bbox
self.assertReferenceChecks(
device_option=caffe2_pb2.DeviceOption(),
op=self.op,
inputs=[self.heatmaps_in, self.bboxes_in],
reference=heatmap_FAIR_keypoint_ref,
threshold=10,
)
def test_approx_heatmap_keypoint(self):
# C++/Python implementation should be bit-wise equal
self.assertReferenceChecks(
device_option=caffe2_pb2.DeviceOption(),
op=self.op,
inputs=[self.heatmaps_in, self.bboxes_in],
reference=heatmap_approx_keypoint_ref,
)
def test_special_cases(self):
example_bboxes = np.array([[0, 0, 100, 100]]).astype(np.float32)
heatmap_tests = []
# special case #1
heatmap_tests.append(np.array([
[0.14722, 0.807823, 0.447052],
[0.652919, 0.850923, -0.225462],
[0.805912, 0.75778, -0.563371],
]).astype(np.float32).reshape((1, 1, 3, 3)))
# special case #2
heatmap_tests.append(np.array([
[3.19541, 3.69551, 3.87579],
[3.63094, 3.89978, 3.67606],
[3.78555, 3.87291, 3.28083],
]).astype(np.float32).reshape((1, 1, 3, 3)))
for heatmap_test in heatmap_tests:
self.assertReferenceChecks(
device_option=caffe2_pb2.DeviceOption(),
op=self.op,
inputs=[heatmap_test, example_bboxes],
reference=heatmap_approx_keypoint_ref,
)
def test_caffe2_pytorch_eq(self):
self.assertReferenceChecks(
device_option=caffe2_pb2.DeviceOption(),
op=self.op,
inputs=[self.heatmaps_in, self.bboxes_in],
reference=c10_op_ref,
)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/heatmap_max_keypoint_op_test.py
|
import hypothesis
from hypothesis import given, settings, HealthCheck
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestSparseLpNorm(hu.HypothesisTestCase):
@staticmethod
def ref_lpnorm(param_in, p, reg_lambda):
"""Reference function that should be matched by the Caffe2 operator."""
if p == 2.0:
return param_in * (1 - reg_lambda)
if p == 1.0:
reg_term = np.ones_like(param_in) * reg_lambda * np.sign(param_in)
param_out = param_in - reg_term
param_out[np.abs(param_in) <= reg_lambda] = 0.
return param_out
raise ValueError
# Suppress filter_too_much health check.
# Likely caused by `assume` call falling through too often.
@settings(suppress_health_check=[HealthCheck.filter_too_much])
@given(inputs=hu.tensors(n=1, min_dim=2, max_dim=2),
p=st.integers(min_value=1, max_value=2),
reg_lambda=st.floats(min_value=1e-4, max_value=1e-1),
data_strategy=st.data(),
**hu.gcs_cpu_only)
def test_sparse_lpnorm(self, inputs, p, reg_lambda, data_strategy, gc, dc):
param, = inputs
param += 0.02 * np.sign(param)
param[param == 0.0] += 0.02
# Create an indexing array containing values that are lists of indices,
# which index into param
indices = data_strategy.draw(
hu.tensor(dtype=np.int64, min_dim=1, max_dim=1,
elements=st.sampled_from(np.arange(param.shape[0]))),
)
hypothesis.note('indices.shape: %s' % str(indices.shape))
# For now, the indices must be unique
hypothesis.assume(np.array_equal(np.unique(indices.flatten()),
np.sort(indices.flatten())))
op = core.CreateOperator(
"SparseLpRegularizer",
["param", "indices"],
["param"],
p=float(p),
reg_lambda=reg_lambda,
)
def ref_sparse_lp_regularizer(param, indices, grad=None):
param_out = np.copy(param)
for _, index in enumerate(indices):
param_out[index] = self.ref_lpnorm(
param[index],
p=p,
reg_lambda=reg_lambda,
)
return (param_out,)
self.assertReferenceChecks(
gc, op, [param, indices],
ref_sparse_lp_regularizer
)
|
pytorch-master
|
caffe2/python/operator_test/sparse_lp_regularizer_test.py
|
import numpy as np
from hypothesis import given, assume
import hypothesis.strategies as st
from caffe2.python import core, model_helper, utils
import caffe2.python.hypothesis_test_util as hu
class TestLeakyRelu(hu.HypothesisTestCase):
def _get_inputs(self, N, C, H, W, order):
input_data = np.random.rand(N, C, H, W).astype(np.float32) - 0.5
# default step size is 0.05
input_data[np.logical_and(
input_data >= 0, input_data <= 0.051)] = 0.051
input_data[np.logical_and(
input_data <= 0, input_data >= -0.051)] = -0.051
if order == 'NHWC':
input_data = utils.NCHW2NHWC(input_data)
return input_data,
def _get_op(self, device_option, alpha, order, inplace=False):
outputs = ['output' if not inplace else "input"]
op = core.CreateOperator(
'LeakyRelu',
['input'],
outputs,
alpha=alpha,
device_option=device_option)
return op
def _feed_inputs(self, input_blobs, device_option):
names = ['input', 'scale', 'bias']
for name, blob in zip(names, input_blobs):
self.ws.create_blob(name).feed(blob, device_option=device_option)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 3),
C=st.integers(2, 3),
H=st.integers(2, 3),
W=st.integers(2, 3),
alpha=st.floats(0, 1),
order=st.sampled_from(['NCHW', 'NHWC']),
seed=st.integers(0, 1000))
def test_leaky_relu_gradients(self, gc, dc, N, C, H, W, order, alpha, seed):
np.random.seed(seed)
op = self._get_op(
device_option=gc,
alpha=alpha,
order=order)
input_blobs = self._get_inputs(N, C, H, W, order)
self.assertDeviceChecks(dc, op, input_blobs, [0])
self.assertGradientChecks(gc, op, input_blobs, 0, [0])
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
alpha=st.floats(0, 1),
seed=st.integers(0, 1000))
def test_leaky_relu_layout(self, gc, dc, N, C, H, W, alpha, seed):
outputs = {}
for order in ('NCHW', 'NHWC'):
np.random.seed(seed)
input_blobs = self._get_inputs(N, C, H, W, order)
self._feed_inputs(input_blobs, device_option=gc)
op = self._get_op(
device_option=gc,
alpha=alpha,
order=order)
self.ws.run(op)
outputs[order] = self.ws.blobs['output'].fetch()
np.testing.assert_allclose(
outputs['NCHW'],
utils.NHWC2NCHW(outputs["NHWC"]),
atol=1e-4,
rtol=1e-4)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
alpha=st.floats(0, 1),
seed=st.integers(0, 1000),
inplace=st.booleans())
def test_leaky_relu_reference_check(self, gc, dc, N, C, H, W, order, alpha,
seed, inplace):
np.random.seed(seed)
if order != "NCHW":
assume(not inplace)
inputs = self._get_inputs(N, C, H, W, order)
op = self._get_op(
device_option=gc,
alpha=alpha,
order=order,
inplace=inplace)
def ref(input_blob):
result = input_blob.copy()
result[result < 0] *= alpha
return result,
self.assertReferenceChecks(gc, op, inputs, ref)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
alpha=st.floats(0, 1),
seed=st.integers(0, 1000))
def test_leaky_relu_device_check(self, gc, dc, N, C, H, W, order, alpha,
seed):
np.random.seed(seed)
inputs = self._get_inputs(N, C, H, W, order)
op = self._get_op(
device_option=gc,
alpha=alpha,
order=order)
self.assertDeviceChecks(dc, op, inputs, [0])
@given(N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
alpha=st.floats(0, 1),
seed=st.integers(0, 1000))
def test_leaky_relu_model_helper_helper(self, N, C, H, W, order, alpha, seed):
np.random.seed(seed)
arg_scope = {'order': order}
model = model_helper.ModelHelper(name="test_model", arg_scope=arg_scope)
model.LeakyRelu(
'input',
'output',
alpha=alpha)
input_blob = np.random.rand(N, C, H, W).astype(np.float32)
if order == 'NHWC':
input_blob = utils.NCHW2NHWC(input_blob)
self.ws.create_blob('input').feed(input_blob)
self.ws.create_net(model.param_init_net).run()
self.ws.create_net(model.net).run()
output_blob = self.ws.blobs['output'].fetch()
if order == 'NHWC':
output_blob = utils.NHWC2NCHW(output_blob)
assert output_blob.shape == (N, C, H, W)
if __name__ == '__main__':
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/leaky_relu_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestChannelStatsOp(serial.SerializedTestCase):
def channel_stats_nchw_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[1]
X = X.reshape(N, C, -1)
sum1 = np.sum(X, axis=(0, 2), keepdims=False)
sum2 = np.sum(X**2, axis=(0, 2), keepdims=False)
return (sum1, sum2)
def channel_stats_nhwc_ref(self, X):
dims = X.shape
N = dims[0]
C = dims[-1]
X = X.reshape(N, -1, C)
sum1 = np.sum(X, axis=(0, 1), keepdims=False)
sum2 = np.sum(X**2, axis=(0, 1), keepdims=False)
return (sum1, sum2)
@given(
N=st.integers(1, 5), C=st.integers(1, 10), H=st.integers(1, 12),
W=st.integers(1, 12), order=st.sampled_from(["NCHW", "NHWC"]), **hu.gcs)
@settings(deadline=10000)
def test_channel_stats_2d(self, N, C, H, W, order, gc, dc):
op = core.CreateOperator(
"ChannelStats",
["X"],
["sum", "sumsq"],
order=order,
)
def ref_op(X):
if order == "NCHW":
return self.channel_stats_nchw_ref(X)
else:
return self.channel_stats_nhwc_ref(X)
X = np.random.randn(N, C, H, W).astype(np.float32)
if order == "NHWC":
X = np.transpose(X, [0, 2, 3, 1])
self.assertReferenceChecks(gc, op, [X], reference=ref_op)
self.assertDeviceChecks(dc, op, [X], [0, 1])
@given(
N=st.integers(1, 5), C=st.integers(1, 10), D=st.integers(1, 6),
H=st.integers(1, 6), W=st.integers(1, 6),
order=st.sampled_from(["NCHW", "NHWC"]), **hu.gcs)
@settings(deadline=10000)
def test_channel_stats_3d(self, N, C, D, H, W, order, gc, dc):
op = core.CreateOperator(
"ChannelStats",
["X"],
["sum", "sumsq"],
order=order,
)
def ref_op(X):
if order == "NCHW":
return self.channel_stats_nchw_ref(X)
else:
return self.channel_stats_nhwc_ref(X)
X = np.random.randn(N, C, D, H, W).astype(np.float32)
if order == "NHWC":
X = np.transpose(X, [0, 2, 3, 4, 1])
self.assertReferenceChecks(gc, op, [X], reference=ref_op)
self.assertDeviceChecks(dc, op, [X], [0, 1])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/channel_stats_op_test.py
|
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestCosineEmbeddingCriterion(serial.SerializedTestCase):
@serial.given(N=st.integers(min_value=10, max_value=20),
seed=st.integers(min_value=0, max_value=65535),
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
def test_cosine_embedding_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
S = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
"CosineEmbeddingCriterion", ["S", "Y"], ["output"],
margin=margin)
def ref_cec(S, Y):
result = (1 - S) * (Y == 1) + np.maximum(S - margin, 0) * (Y == -1)
return (result, )
# This checks the op implementation against a reference function in
# python.
self.assertReferenceChecks(gc, op, [S, Y], ref_cec)
# This checks the op implementation over multiple device options (e.g.
# CPU and CUDA). [0] means that the 0-th output is checked.
self.assertDeviceChecks(dc, op, [S, Y], [0])
# Now, since this operator's output has a "kink" around the margin
# value, we move the S vector away from the margin a little bit. This
# is a standard trick to avoid gradient check to fail on subgradient
# points.
S[np.abs(S - margin) < 0.1] += 0.2
# This checks the operator's gradient. the first 0 means that we are
# checking the gradient of the first input (S), and the second [0] means
# that the gradient check should initiate from the 0-th output.
self.assertGradientChecks(gc, op, [S, Y], 0, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/cosine_embedding_criterion_op_test.py
|
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
def calculate_ap(predictions, labels):
N, D = predictions.shape
ap = np.zeros(D)
num_range = np.arange((N), dtype=np.float32) + 1
for k in range(D):
scores = predictions[:N, k]
label = labels[:N, k]
sortind = np.argsort(-scores, kind='mergesort')
truth = label[sortind]
precision = np.cumsum(truth) / num_range
ap[k] = precision[truth.astype(np.bool)].sum() / max(1, truth.sum())
return ap
class TestAPMeterOps(hu.HypothesisTestCase):
@given(predictions=hu.arrays(dims=[10, 3],
elements=hu.floats(allow_nan=False,
allow_infinity=False,
min_value=0.1,
max_value=1)),
labels=hu.arrays(dims=[10, 3],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=1)),
**hu.gcs_cpu_only)
def test_average_precision(self, predictions, labels, gc, dc):
op = core.CreateOperator(
"APMeter",
["predictions", "labels"],
["AP"],
buffer_size=10,
)
def op_ref(predictions, labels):
ap = calculate_ap(predictions, labels)
return (ap, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[predictions, labels],
reference=op_ref)
@given(predictions=hu.arrays(dims=[10, 3],
elements=hu.floats(allow_nan=False,
allow_infinity=False,
min_value=0.1,
max_value=1)),
labels=hu.arrays(dims=[10, 3],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=1)),
**hu.gcs_cpu_only)
def test_average_precision_small_buffer(self, predictions, labels, gc, dc):
op_small_buffer = core.CreateOperator(
"APMeter",
["predictions", "labels"],
["AP"],
buffer_size=5,
)
def op_ref(predictions, labels):
# We can only hold the last 5 in the buffer
ap = calculate_ap(predictions[5:], labels[5:])
return (ap, )
self.assertReferenceChecks(
device_option=gc,
op=op_small_buffer,
inputs=[predictions, labels],
reference=op_ref
)
|
pytorch-master
|
caffe2/python/operator_test/apmeter_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestLengthsTileOp(serial.SerializedTestCase):
@serial.given(
inputs=st.integers(min_value=1, max_value=20).flatmap(
lambda size: st.tuples(
hu.arrays([size], dtype=np.float32),
hu.arrays([size], dtype=np.int32,
elements=st.integers(min_value=0, max_value=20)),
)
),
**hu.gcs)
def test_lengths_tile(self, inputs, gc, dc):
data, lengths = inputs
def lengths_tile_op(data, lengths):
return [np.concatenate([
[d] * l for d, l in zip(data, lengths)
])]
op = core.CreateOperator(
"LengthsTile",
["data", "lengths"],
["output"],
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data, lengths],
reference=lengths_tile_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[data, lengths],
outputs_to_check=0,
outputs_with_grads=[0]
)
|
pytorch-master
|
caffe2/python/operator_test/lengths_tile_op_test.py
|
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, utils
import caffe2.python.hip_test_util as hiputl
import caffe2.python.hypothesis_test_util as hu
import unittest
class TestGroupConvolution(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
size=st.integers(7, 10),
group=st.integers(1, 4),
input_channels_per_group=st.integers(1, 8),
output_channels_per_group=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
# Note: Eigen does not support group convolution, but it should
# fall back to the default engine without failing.
engine=st.sampled_from(["", "CUDNN", "EIGEN"]),
use_bias=st.booleans(),
**hu.gcs)
@settings(max_examples=2, deadline=None)
def test_group_convolution(
self, stride, pad, kernel, size, group,
input_channels_per_group, output_channels_per_group, batch_size,
order, engine, use_bias, gc, dc):
assume(size >= kernel)
if hiputl.run_in_hip(gc, dc):
if order == "NHWC":
assume(group == 1 and engine != "CUDNN")
else:
# TODO: Group conv in NHWC not implemented for GPU yet.
assume(group == 1 or order == "NCHW" or gc.device_type == caffe2_pb2.CPU)
if group != 1 and order == "NHWC":
dc = [d for d in dc if d.device_type == caffe2_pb2.CPU]
# Group conv not implemented with EIGEN engine.
assume(group == 1 or engine != "EIGEN")
input_channels = input_channels_per_group * group
output_channels = output_channels_per_group * group
op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
engine=engine,
group=group,
)
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
w = np.random.rand(
output_channels, kernel, kernel,
input_channels_per_group).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = utils.NHWC2NCHW(X)
w = utils.NHWC2NCHW(w)
inputs = [X, w, b] if use_bias else [X, w]
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/group_conv_test.py
|
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
class SparseDropoutWithReplacementTest(hu.HypothesisTestCase):
@given(**hu.gcs_cpu_only)
def test_no_dropout(self, gc, dc):
X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)
Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)
replacement_value = -1
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Lengths").feed(Lengths)
sparse_dropout_op = core.CreateOperator(
"SparseDropoutWithReplacement", ["X", "Lengths"], ["Y", "LY"],
ratio=0.0, replacement_value=replacement_value)
self.ws.run(sparse_dropout_op)
Y = self.ws.blobs["Y"].fetch()
OutputLengths = self.ws.blobs["LY"].fetch()
self.assertListEqual(X.tolist(), Y.tolist(),
"Values should stay unchanged")
self.assertListEqual(Lengths.tolist(), OutputLengths.tolist(),
"Lengths should stay unchanged.")
@given(**hu.gcs_cpu_only)
def test_all_dropout(self, gc, dc):
X = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]).astype(np.int64)
Lengths = np.array([2, 2, 2, 2, 2]).astype(np.int32)
replacement_value = -1
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Lengths").feed(Lengths)
sparse_dropout_op = core.CreateOperator(
"SparseDropoutWithReplacement", ["X", "Lengths"], ["Y", "LY"],
ratio=1.0, replacement_value=replacement_value)
self.ws.run(sparse_dropout_op)
y = self.ws.blobs["Y"].fetch()
lengths = self.ws.blobs["LY"].fetch()
for elem in y:
self.assertEqual(elem, replacement_value, "Expected all \
negative elements when dropout ratio is 1.")
for length in lengths:
self.assertEqual(length, 1)
self.assertEqual(sum(lengths), len(y))
@given(**hu.gcs_cpu_only)
def test_all_dropout_empty_input(self, gc, dc):
X = np.array([]).astype(np.int64)
Lengths = np.array([0]).astype(np.int32)
replacement_value = -1
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Lengths").feed(Lengths)
sparse_dropout_op = core.CreateOperator(
"SparseDropoutWithReplacement", ["X", "Lengths"], ["Y", "LY"],
ratio=1.0, replacement_value=replacement_value)
self.ws.run(sparse_dropout_op)
y = self.ws.blobs["Y"].fetch()
lengths = self.ws.blobs["LY"].fetch()
self.assertEqual(len(y), 1, "Expected single dropout value")
self.assertEqual(len(lengths), 1, "Expected single element \
in lengths array")
self.assertEqual(lengths[0], 1, "Expected 1 as sole length")
self.assertEqual(sum(lengths), len(y))
|
pytorch-master
|
caffe2/python/operator_test/sparse_dropout_with_replacement_op_test.py
|
import math
import struct
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.operator_test.fused_nbit_rowwise_test_helper import (
_compress_uniform_simplified,
param_search_greedy,
)
from hypothesis import assume, given, settings
# Eigen/Python round 0.5 away from 0, Numpy rounds to even
round_to_nearest = np.vectorize(round)
def bytes_to_half_floats(byte_matrix):
floats = np.empty([np.shape(byte_matrix)[0], 1], dtype=np.float16)
for i, byte_values in enumerate(byte_matrix):
(floats[i],) = np.frombuffer(
memoryview(byte_values).tobytes(), dtype=np.float16
)
return floats
def half_floats_to_bytes(floats):
byte_matrix = np.empty([np.shape(floats)[0], 2], dtype=np.uint8)
for i, value in enumerate(floats):
assert isinstance(value, np.float16), (value, floats)
byte_matrix[i] = np.frombuffer(
memoryview(np.array([value])).tobytes(), dtype=np.uint8
)
return byte_matrix
def int8_to_bytes(int8s):
byte_matrix = np.empty([np.shape(int8s)[0], 1], dtype=np.uint8)
for i, value in enumerate(int8s):
assert isinstance(value, np.int8), (value, int8s)
as_bytes = struct.pack("b", value)
# In Python3 bytes will be a list of int, in Python2 a list of string
if isinstance(as_bytes[0], int):
byte_matrix[i] = list(as_bytes)
else:
byte_matrix[i] = [ord(i) for i in as_bytes]
return byte_matrix
def fused_rowwise_nbit_quantize_reference(data, bit):
minimum = np.min(data, axis=1).astype(np.float16).astype(np.float32)
maximum = np.max(data, axis=1)
span = maximum - minimum
qmax = (1 << bit) - 1
scale = (span / qmax).astype(np.float16).astype(np.float32)
bias = np.zeros(data.shape[0])
quantized_data = np.zeros(data.shape).astype(np.uint8)
for i in range(data.shape[0]):
bias[i] = minimum[i]
inverse_scale = 1.0 if scale[i] == 0.0 else 1 / scale[i]
if scale[i] == 0.0 or math.isinf(inverse_scale):
scale[i] = 1.0
inverse_scale = 1.0
quantized_data[i] = np.clip(
np.round((data[i, :] - minimum[i]) * inverse_scale), 0, qmax
)
# pack
assert 8 % bit == 0
num_elem_per_byte = 8 // bit
packed_dim = (data.shape[1] + num_elem_per_byte - 1) // num_elem_per_byte
packed_data = np.zeros([data.shape[0], packed_dim]).astype(np.uint8)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
if j % num_elem_per_byte == 0:
packed_data[i, j // num_elem_per_byte] = quantized_data[i, j]
else:
packed_data[i, j // num_elem_per_byte] += quantized_data[i, j] << (
(j % num_elem_per_byte) * bit
)
scale_bytes = half_floats_to_bytes(scale.astype(np.float16))
bias_bytes = half_floats_to_bytes(bias.astype(np.float16))
return np.concatenate([packed_data, scale_bytes, bias_bytes], axis=1)
def fused_rowwise_nbit_quantize_dequantize_reference(data, bit):
fused_quantized = fused_rowwise_nbit_quantize_reference(data, bit)
scale = bytes_to_half_floats(fused_quantized[:, -4:-2].astype(np.uint8)).astype(
np.float32
)
bias = bytes_to_half_floats(fused_quantized[:, -2:].astype(np.uint8)).astype(
np.float32
)
quantized_data = fused_quantized[:, :-4]
# unpack
packed_dim = fused_quantized.shape[1] - 4
assert 8 % bit == 0
num_elem_per_byte = 8 // bit
assert packed_dim == ((data.shape[1] + num_elem_per_byte - 1) // num_elem_per_byte)
unpacked_data = np.zeros(data.shape).astype(np.uint8)
for i in range(data.shape[0]):
for j in range(data.shape[1]):
unpacked_data[i, j] = (
quantized_data[i, j // num_elem_per_byte]
>> ((j % num_elem_per_byte) * bit)
) & ((1 << bit) - 1)
return scale * unpacked_data + bias
class TestFusedNBitRowwiseQuantizationConversion(hu.HypothesisTestCase):
@given(input_data=hu.tensor(min_dim=2, max_dim=2), bit_rate=st.sampled_from([2, 4]))
def test_quantize_op(self, input_data, bit_rate):
assert 8 % bit_rate == 0
num_elem_per_byte = 8 // bit_rate
assume(input_data.shape[1] % num_elem_per_byte == 0)
quantize = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized",
["input_data"],
["quantized_data"],
)
workspace.FeedBlob("input_data", input_data)
workspace.RunOperatorOnce(quantize)
quantized_data = workspace.FetchBlob("quantized_data")
reference = fused_rowwise_nbit_quantize_reference(
input_data.astype(np.float32), bit_rate
)
interleaved_dim = input_data.shape[1] // num_elem_per_byte
# compare quantized data
np.testing.assert_array_equal(
quantized_data[:, :interleaved_dim], reference[:, :interleaved_dim]
)
# compare scales
np.testing.assert_array_almost_equal(
bytes_to_half_floats(
quantized_data[:, interleaved_dim : interleaved_dim + 2]
),
bytes_to_half_floats(reference[:, interleaved_dim : interleaved_dim + 2]),
)
# compare zero points
np.testing.assert_array_equal(
quantized_data[:, interleaved_dim + 2], reference[:, interleaved_dim + 2]
)
@given(
batch_size=st.integers(1, 100),
block_size=st.integers(1, 100),
bit_rate=st.sampled_from([2, 4]),
)
def test_quantize_and_dequantize_op(self, batch_size, block_size, bit_rate):
assert 8 % bit_rate == 0
num_elem_per_byte = 8 // bit_rate
input_data = np.random.rand(batch_size, block_size).astype(np.float32)
assume(input_data.shape[1] % num_elem_per_byte == 0)
quantize = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized",
["input_data"],
["quantized_data"],
)
workspace.FeedBlob("input_data", input_data)
workspace.RunOperatorOnce(quantize)
quantized_data = workspace.FetchBlob("quantized_data")
dequantize = core.CreateOperator(
"Fused" + str(bit_rate) + "BitRowwiseQuantizedToFloat",
["quantized_data"],
["dequantized_data"],
)
workspace.FeedBlob("quantized_data", quantized_data)
workspace.RunOperatorOnce(dequantize)
dequantized_data = workspace.FetchBlob("dequantized_data")
reference = fused_rowwise_nbit_quantize_dequantize_reference(
input_data, bit_rate
)
np.testing.assert_array_almost_equal(dequantized_data, reference)
def ErrorThresholdRow(X, bit_rate):
# minimum representable error in bit_rate per row
min_elem = np.min(X, axis=1)
max_elem = np.max(X, axis=1)
bias = np.float16(min_elem)
scale = np.float16((max_elem - bias) / ((1 << bit_rate) - 1))
max_round_error = scale / 2
max_clip_error = np.maximum(
np.abs(min_elem - bias), np.abs(scale * ((1 << bit_rate) - 1) + bias - max_elem)
)
thres = np.maximum(max_round_error, max_clip_error) * 1.1
return thres
class TestNBitFakeFused(hu.HypothesisTestCase):
@given(bit_rate=st.sampled_from([2, 4]))
@settings(deadline=10000)
def testNBit(self, bit_rate):
# uncomment for debugging
# np.random.seed(0)
net = core.Net("bench")
batchsize = np.random.randint(2, 1000)
blocksize = np.random.randint(2, 1000)
input_data = np.random.rand(batchsize, blocksize).astype(np.float32)
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitFakeRowwiseQuantized",
"input_data",
"minmax_quantized_data",
)
net.Proto().op.extend([op])
net.Fused8BitRowwiseQuantizedToFloat(
"minmax_quantized_data", "minmax_dequantized_data"
)
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitFakeRowwiseQuantized",
"input_data",
"greedy_quantized_data",
engine="GREEDY",
)
net.Proto().op.extend([op])
net.Fused8BitRowwiseQuantizedToFloat(
"greedy_quantized_data", "greedy_dequantized_data"
)
workspace.FeedBlob("input_data", input_data)
workspace.GlobalInit(["caffe2", "--caffe2_log_level=0"])
workspace.RunNetOnce(net)
minmax_dequantized_data = workspace.FetchBlob("minmax_dequantized_data")
greedy_dequantized_data = workspace.FetchBlob("greedy_dequantized_data")
err_thres = ErrorThresholdRow(input_data, bit_rate)
diff_minmax = np.abs(input_data - minmax_dequantized_data)
diff_greedy = np.abs(input_data - greedy_dequantized_data)
for i in range(err_thres.size):
# Check error from minmax quantization is within the bound derived from the range
assert (
np.sum(diff_minmax[i, :] > err_thres[i]) == 0
), "error at row {} too high (diff_minmax[i, :] {} diff_minmax[i, :] > err_thres[i] {} err_thres[i] {}".format(
i, diff_minmax[i, :], diff_minmax[i, :] > err_thres[i], err_thres[i]
)
# Check error from greedy quantization is smaller than minmax quantization
# Multiply by a margin 1.03 to consider inexactness of
# floating-point operations and from binning (in exact math,
# l2_greedy should be no less than l2_minmax).
l2_minmax_err = np.linalg.norm(diff_minmax[i, :])
l2_greedy_err = np.linalg.norm(diff_greedy[i, :])
assert (
l2_greedy_err <= l2_minmax_err * 1.03
), "L2 quantization error using greedy algorithm {} at row {} is bigger than error using minmax {} (input_data[i,:] {} minmax_dequantized_data[i,:] {} greedy_dequantized_data[i,:] {}".format( # noqa
l2_greedy_err,
i,
l2_minmax_err,
input_data[i, :],
minmax_dequantized_data[i, :],
greedy_dequantized_data[i, :],
)
class TestNBitGreedyFused(hu.HypothesisTestCase):
@given(bit_rate=st.sampled_from([2, 4]))
@settings(deadline=None, max_examples=50)
def testNBit(self, bit_rate):
# uncomment for debugging
# np.random.seed(0)
net = core.Net("bench")
batchsize = np.random.randint(2, 1000)
assert 8 % bit_rate == 0
num_elem_per_byte = 8 // bit_rate
blocksize = np.random.randint(2, 500) * num_elem_per_byte
input_data = np.random.rand(batchsize, blocksize).astype(np.float32)
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized",
"input_data",
"minmax_quantized_data",
)
net.Proto().op.extend([op])
op = core.CreateOperator(
"Fused" + str(bit_rate) + "BitRowwiseQuantizedToFloat",
"minmax_quantized_data",
"minmax_dequantized_data",
)
net.Proto().op.extend([op])
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized",
"input_data",
"greedy_quantized_data",
engine="GREEDY",
)
net.Proto().op.extend([op])
op = core.CreateOperator(
"Fused" + str(bit_rate) + "BitRowwiseQuantizedToFloat",
"greedy_quantized_data",
"greedy_dequantized_data",
)
net.Proto().op.extend([op])
workspace.FeedBlob("input_data", input_data)
workspace.GlobalInit(["caffe2", "--caffe2_log_level=0"])
workspace.RunNetOnce(net)
minmax_dequantized_data = workspace.FetchBlob("minmax_dequantized_data")
greedy_dequantized_data = workspace.FetchBlob("greedy_dequantized_data")
diff_minmax = np.abs(input_data - minmax_dequantized_data)
l2_minmax = np.linalg.norm(input_data - minmax_dequantized_data, axis=1)
diff_greedy = np.abs(input_data - greedy_dequantized_data)
l2_greedy = np.linalg.norm(input_data - greedy_dequantized_data, axis=1)
for i in range(input_data.shape[0]):
# Compare with Python reference greedy search implementation
xmin, xmax = param_search_greedy(
input_data[i, :], bit_rate, n_bins=200, ratio=0.16
)
X_q_ref, l2_greedy_ref = _compress_uniform_simplified(
input_data[i, :], bit_rate, xmin, xmax, fp16_scale_bias=True
)
l2_discrepancy = np.abs(l2_greedy[i] - l2_greedy_ref) / input_data.shape[1]
# C++ implementation has a different accumulation order when
# computing norm in compress_uniform_simplified_ so we shouldn't
# use too small tolerance.
assert (
l2_discrepancy < 1e-5
), "l2_discrepancy between C++ and Python greedy algorithm {} at row {} is too high (actual l2 err {} ref l2 err {} actual {} ref {})".format( # noqa
l2_discrepancy,
i,
l2_greedy[i],
l2_greedy_ref,
greedy_dequantized_data[i, :],
X_q_ref,
)
# Check error from greedy quantization is smaller than minmax quantization
# Multiply by a margin 1.03 to consider inexactness of
# floating-point operations and from binning (in exact math,
# l2_greedy should be no less than l2_minmax).
assert (
l2_greedy[i] <= l2_minmax[i] * 1.03
), "L2 quantization error using greedy algorithm {} at row {} is bigger than error using minmax {}".format(
l2_greedy[i], i, l2_minmax[i]
)
|
pytorch-master
|
caffe2/python/operator_test/fused_nbit_rowwise_conversion_ops_test.py
|
from caffe2.python import core
from collections import defaultdict, Counter
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
DEFAULT_BEAM_WIDTH = 10
DEFAULT_PRUNE_THRESHOLD = 0.001
class TestCTCBeamSearchDecoderOp(serial.SerializedTestCase):
@given(
batch=st.sampled_from([1, 2, 4]),
max_time=st.sampled_from([1, 8, 64]),
alphabet_size=st.sampled_from([1, 2, 32, 128, 512]),
beam_width=st.sampled_from([1, 2, 16, None]),
num_candidates=st.sampled_from([1, 2]),
**hu.gcs_cpu_only
)
@settings(deadline=None, max_examples=30)
def test_ctc_beam_search_decoder(
self, batch, max_time, alphabet_size, beam_width, num_candidates, gc, dc
):
if not beam_width:
beam_width = DEFAULT_BEAM_WIDTH
op_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
['INPUTS', 'SEQ_LEN'],
['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],
num_candidates=num_candidates)
op_no_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
['INPUTS'],
['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],
num_candidates=num_candidates)
else:
num_candidates = min(num_candidates, beam_width)
op_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
['INPUTS', 'SEQ_LEN'],
['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],
beam_width=beam_width,
num_candidates=num_candidates)
op_no_seq_len = core.CreateOperator('CTCBeamSearchDecoder',
['INPUTS'],
['OUTPUT_LEN', 'VALUES', 'OUTPUT_PROB'],
beam_width=beam_width,
num_candidates=num_candidates)
def input_generater():
inputs = np.random.rand(max_time, batch, alphabet_size)\
.astype(np.float32)
seq_len = np.random.randint(1, max_time + 1, size=batch)\
.astype(np.int32)
return inputs, seq_len
def ref_ctc_decoder(inputs, seq_len):
output_len = np.zeros(batch * num_candidates, dtype=np.int32)
output_prob = np.zeros(batch * num_candidates, dtype=np.float32)
val = np.array([]).astype(np.int32)
for i in range(batch):
Pb, Pnb = defaultdict(Counter), defaultdict(Counter)
Pb[0][()] = 1
Pnb[0][()] = 0
A_prev = [()]
ctc = inputs[:, i, :]
ctc = np.vstack((np.zeros(alphabet_size), ctc))
len_i = seq_len[i] if seq_len is not None else max_time
for t in range(1, len_i + 1):
pruned_alphabet = np.where(ctc[t] > DEFAULT_PRUNE_THRESHOLD)[0]
for l in A_prev:
for c in pruned_alphabet:
if c == 0:
Pb[t][l] += ctc[t][c] * (Pb[t - 1][l] + Pnb[t - 1][l])
else:
l_plus = l + (c,)
if len(l) > 0 and c == l[-1]:
Pnb[t][l_plus] += ctc[t][c] * Pb[t - 1][l]
Pnb[t][l] += ctc[t][c] * Pnb[t - 1][l]
else:
Pnb[t][l_plus] += \
ctc[t][c] * (Pb[t - 1][l] + Pnb[t - 1][l])
if l_plus not in A_prev:
Pb[t][l_plus] += \
ctc[t][0] * \
(Pb[t - 1][l_plus] + Pnb[t - 1][l_plus])
Pnb[t][l_plus] += ctc[t][c] * Pnb[t - 1][l_plus]
A_next = Pb[t] + Pnb[t]
A_prev = sorted(A_next, key=A_next.get, reverse=True)
A_prev = A_prev[:beam_width]
candidates = A_prev[:num_candidates]
index = 0
for candidate in candidates:
val = np.hstack((val, candidate))
output_len[i * num_candidates + index] = len(candidate)
output_prob[i * num_candidates + index] = Pb[t][candidate] + Pnb[t][candidate]
index += 1
return [output_len, val, output_prob]
def ref_ctc_decoder_max_time(inputs):
return ref_ctc_decoder(inputs, None)
inputs, seq_len = input_generater()
self.assertReferenceChecks(
device_option=gc,
op=op_seq_len,
inputs=[inputs, seq_len],
reference=ref_ctc_decoder,
)
self.assertReferenceChecks(
device_option=gc,
op=op_no_seq_len,
inputs=[inputs],
reference=ref_ctc_decoder_max_time,
)
if __name__ == "__main__":
import random
random.seed(2603)
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/ctc_beam_search_decoder_op_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestLossOps(serial.SerializedTestCase):
@serial.given(n=st.integers(1, 8), **hu.gcs)
def test_averaged_loss(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def avg_op(X):
return [np.mean(X)]
op = core.CreateOperator(
"AveragedLoss",
["X"],
["y"],
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=avg_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
|
pytorch-master
|
caffe2/python/operator_test/loss_ops_test.py
|
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import tempfile
class TestCounterOps(TestCase):
def test_counter_ops(self):
workspace.RunOperatorOnce(core.CreateOperator(
'CreateCounter', [], ['c'], init_count=1))
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['c'], ['t1'])) # 1 -> 0
assert not workspace.FetchBlob('t1')
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['c'], ['t2'])) # 0 -> -1
assert workspace.FetchBlob('t2')
workspace.RunOperatorOnce(core.CreateOperator(
'CountUp', ['c'], ['t21'])) # -1 -> 0
assert workspace.FetchBlob('t21') == -1
workspace.RunOperatorOnce(core.CreateOperator(
'RetrieveCount', ['c'], ['t22']))
assert workspace.FetchBlob('t22') == 0
workspace.RunOperatorOnce(core.CreateOperator(
'ResetCounter', ['c'], [], init_count=1)) # -> 1
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['c'], ['t3'])) # 1 -> 0
assert not workspace.FetchBlob('t3')
workspace.RunOperatorOnce(core.CreateOperator(
'ResetCounter', ['c'], ['t31'], init_count=5)) # 0 -> 5
assert workspace.FetchBlob('t31') == 0
workspace.RunOperatorOnce(core.CreateOperator(
'ResetCounter', ['c'], ['t32'])) # 5 -> 0
assert workspace.FetchBlob('t32') == 5
workspace.RunOperatorOnce(core.CreateOperator(
'ConstantFill', [], ['t4'], value=False, shape=[],
dtype=core.DataType.BOOL))
assert workspace.FetchBlob('t4') == workspace.FetchBlob('t1')
workspace.RunOperatorOnce(core.CreateOperator(
'ConstantFill', [], ['t5'], value=True, shape=[],
dtype=core.DataType.BOOL))
assert workspace.FetchBlob('t5') == workspace.FetchBlob('t2')
assert workspace.RunOperatorOnce(core.CreateOperator(
'And', ['t1', 't2'], ['t6']))
assert not workspace.FetchBlob('t6') # True && False
assert workspace.RunOperatorOnce(core.CreateOperator(
'And', ['t2', 't5'], ['t7']))
assert workspace.FetchBlob('t7') # True && True
workspace.RunOperatorOnce(core.CreateOperator(
'CreateCounter', [], ['serialized_c'], init_count=22))
with tempfile.NamedTemporaryFile() as tmp:
workspace.RunOperatorOnce(core.CreateOperator(
'Save', ['serialized_c'], [], absolute_path=1,
db_type='minidb', db=tmp.name))
for i in range(10):
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['serialized_c'], ['t8']))
workspace.RunOperatorOnce(core.CreateOperator(
'RetrieveCount', ['serialized_c'], ['t8']))
assert workspace.FetchBlob('t8') == 12
workspace.RunOperatorOnce(core.CreateOperator(
'Load', [], ['serialized_c'], absolute_path=1,
db_type='minidb', db=tmp.name))
workspace.RunOperatorOnce(core.CreateOperator(
'RetrieveCount', ['serialized_c'], ['t8']))
assert workspace.FetchBlob('t8') == 22
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/counter_ops_test.py
|
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import numpy as np
class TestCastOp(hu.HypothesisTestCase):
@given(**hu.gcs)
def test_cast_int_float(self, gc, dc):
data = np.random.rand(5, 5).astype(np.int32)
# from int to float
op = core.CreateOperator('Cast', 'data', 'data_cast', to=1, from_type=2)
self.assertDeviceChecks(dc, op, [data], [0])
# This is actually 0
self.assertGradientChecks(gc, op, [data], 0, [0])
@given(**hu.gcs)
def test_cast_int_float_empty(self, gc, dc):
data = np.random.rand(0).astype(np.int32)
# from int to float
op = core.CreateOperator('Cast', 'data', 'data_cast', to=1, from_type=2)
self.assertDeviceChecks(dc, op, [data], [0])
# This is actually 0
self.assertGradientChecks(gc, op, [data], 0, [0])
@given(data=hu.tensor(dtype=np.int32), **hu.gcs_cpu_only)
def test_cast_int_to_string(self, data, gc, dc):
op = core.CreateOperator(
'Cast', 'data', 'data_cast', to=core.DataType.STRING)
def ref(data):
ret = data.astype(dtype=np.str)
# the string blob will be fetched as object, we feed and re-fetch
# to mimic this.
with hu.temp_workspace('tmp_ref_int_to_string'):
workspace.FeedBlob('tmp_blob', ret)
fetched_ret = workspace.FetchBlob('tmp_blob')
return (fetched_ret, )
self.assertReferenceChecks(gc, op, inputs=[data], reference=ref)
|
pytorch-master
|
caffe2/python/operator_test/cast_op_test.py
|
from caffe2.python import model_helper, workspace, core, rnn_cell
from future.utils import viewitems
import numpy as np
import unittest
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
class TestLSTMs(unittest.TestCase):
def testEqualToCudnn(self):
with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType)):
T = 8
batch_size = 4
input_dim = 8
hidden_dim = 31
workspace.FeedBlob(
"seq_lengths",
np.array([T] * batch_size, dtype=np.int32)
)
workspace.FeedBlob("target", np.zeros(
[T, batch_size, hidden_dim], dtype=np.float32
))
workspace.FeedBlob("hidden_init", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
workspace.FeedBlob("cell_init", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
own_model = model_helper.ModelHelper(name="own_lstm")
input_shape = [T, batch_size, input_dim]
cudnn_model = model_helper.ModelHelper(name="cudnn_lstm")
input_blob = cudnn_model.param_init_net.UniformFill(
[], "input", shape=input_shape)
workspace.FeedBlob("CUDNN/hidden_init_cudnn", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
workspace.FeedBlob("CUDNN/cell_init_cudnn", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
cudnn_output, cudnn_last_hidden, cudnn_last_state, param_extract = rnn_cell.cudnn_LSTM(
model=cudnn_model,
input_blob=input_blob,
initial_states=("hidden_init_cudnn", "cell_init_cudnn"),
dim_in=input_dim,
dim_out=hidden_dim,
scope="CUDNN",
return_params=True,
)
cudnn_loss = cudnn_model.AveragedLoss(
cudnn_model.SquaredL2Distance(
[cudnn_output, "target"], "CUDNN/dist"
), "CUDNN/loss"
)
own_output, own_last_hidden, _, own_last_state, own_params = rnn_cell.LSTM(
model=own_model,
input_blob=input_blob,
seq_lengths="seq_lengths",
initial_states=("hidden_init", "cell_init"),
dim_in=input_dim,
dim_out=hidden_dim,
scope="OWN",
return_params=True,
)
own_loss = own_model.AveragedLoss(
own_model.SquaredL2Distance([own_output, "target"], "OWN/dist"),
"OWN/loss"
)
# Add gradients
cudnn_model.AddGradientOperators([cudnn_loss])
own_model.AddGradientOperators([own_loss])
# Add parameter updates
LR = cudnn_model.param_init_net.ConstantFill(
[], shape=[1], value=0.01
)
ONE = cudnn_model.param_init_net.ConstantFill(
[], shape=[1], value=1.0
)
for param in cudnn_model.GetParams():
cudnn_model.WeightedSum(
[param, ONE, cudnn_model.param_to_grad[param], LR], param
)
for param in own_model.GetParams():
own_model.WeightedSum(
[param, ONE, own_model.param_to_grad[param], LR], param
)
# Copy states over
own_model.net.Copy(own_last_hidden, "hidden_init")
own_model.net.Copy(own_last_state, "cell_init")
cudnn_model.net.Copy(cudnn_last_hidden, "CUDNN/hidden_init_cudnn")
cudnn_model.net.Copy(cudnn_last_state, "CUDNN/cell_init_cudnn")
workspace.RunNetOnce(cudnn_model.param_init_net)
workspace.CreateNet(cudnn_model.net)
##
## CUDNN LSTM MODEL EXECUTION
##
# Get initial values from CuDNN LSTM so we can feed them
# to our own.
(param_extract_net, param_extract_mapping) = param_extract
workspace.RunNetOnce(param_extract_net)
cudnn_lstm_params = {
input_type: {
k: workspace.FetchBlob(v[0])
for k, v in viewitems(pars)
}
for input_type, pars in viewitems(param_extract_mapping)
}
# Run the model 3 times, so that some parameter updates are done
workspace.RunNet(cudnn_model.net.Proto().name, 3)
##
## OWN LSTM MODEL EXECUTION
##
# Map the cuDNN parameters to our own
workspace.RunNetOnce(own_model.param_init_net)
rnn_cell.InitFromLSTMParams(own_params, cudnn_lstm_params)
# Run the model 3 times, so that some parameter updates are done
workspace.CreateNet(own_model.net)
workspace.RunNet(own_model.net.Proto().name, 3)
##
## COMPARE RESULTS
##
# Then compare that final results after 3 runs are equal
own_output_data = workspace.FetchBlob(own_output)
own_last_hidden = workspace.FetchBlob(own_last_hidden)
own_loss = workspace.FetchBlob(own_loss)
cudnn_output_data = workspace.FetchBlob(cudnn_output)
cudnn_last_hidden = workspace.FetchBlob(cudnn_last_hidden)
cudnn_loss = workspace.FetchBlob(cudnn_loss)
self.assertTrue(np.allclose(own_output_data, cudnn_output_data))
self.assertTrue(np.allclose(own_last_hidden, cudnn_last_hidden))
self.assertTrue(np.allclose(own_loss, cudnn_loss))
|
pytorch-master
|
caffe2/python/operator_test/cudnn_recurrent_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
class TestLengthsPadOp(serial.SerializedTestCase):
@serial.given(
inputs=hu.lengths_tensor(
dtype=np.float32,
min_value=1,
max_value=5,
allow_empty=True,
),
delta_length=st.integers(0, 10),
padding_value=st.floats(-10.0, 10.0),
**hu.gcs
)
def test_lengths_pad(self, inputs, delta_length, padding_value, gc, dc):
data, lengths = inputs
max_length = np.max(lengths) if len(lengths) > 0 else 0
target_length = max(max_length + delta_length, 1)
def lengths_pad_op(data, lengths):
N = len(lengths)
output = np.ndarray(
shape=(target_length * N, ) + data.shape[1:], dtype=np.float32)
output.fill(padding_value)
ptr1, ptr2 = 0, 0
for i in range(N):
output[ptr1:ptr1 + lengths[i]] = data[ptr2:ptr2 + lengths[i]]
ptr1 += target_length
ptr2 += lengths[i]
return [output]
op = core.CreateOperator(
"LengthsPad",
["data", "lengths"],
["data_padded"],
target_length=target_length,
padding_value=padding_value,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data, lengths],
reference=lengths_pad_op,
)
|
pytorch-master
|
caffe2/python/operator_test/lengths_pad_op_test.py
|
import numpy as np
from hypothesis import given, settings
import hypothesis.strategies as st
import unittest
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
class TestTile(serial.SerializedTestCase):
@given(M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
tiles=st.integers(min_value=1, max_value=3),
axis=st.integers(min_value=0, max_value=2),
**hu.gcs)
@settings(deadline=10000)
def test_tile(self, M, K, N, tiles, axis, gc, dc):
X = np.random.rand(M, K, N).astype(np.float32)
op = core.CreateOperator(
'Tile', ['X'], 'out',
tiles=tiles,
axis=axis,
)
def tile_ref(X, tiles, axis):
dims = np.asarray([1, 1, 1], dtype=np.int)
dims[axis] = tiles
tiled_data = np.tile(X, dims)
return (tiled_data,)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, tiles, axis],
tile_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(M=st.integers(min_value=1, max_value=200),
N=st.integers(min_value=1, max_value=200),
tiles=st.integers(min_value=50, max_value=100),
**hu.gcs)
def test_tile_grad(self, M, N, tiles, gc, dc):
X = np.random.rand(M, N).astype(np.float32)
axis = 1
op = core.CreateOperator(
'Tile', ['X'], 'out',
tiles=tiles,
axis=axis,
)
def tile_ref(X, tiles, axis):
dims = np.asarray([1, 1], dtype=np.int)
dims[axis] = tiles
tiled_data = np.tile(X, dims)
return (tiled_data,)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, tiles, axis],
tile_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
grad_op = core.CreateOperator(
'TileGradient', ['dOut'], 'dX',
tiles=tiles,
axis=axis,
)
dX = np.random.rand(M, N * tiles).astype(np.float32)
self.assertDeviceChecks(dc, grad_op, [dX], [0])
@given(M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
tiles=st.integers(min_value=1, max_value=3),
axis=st.integers(min_value=0, max_value=2),
**hu.gcs)
@settings(deadline=10000)
def test_tilewinput(self, M, K, N, tiles, axis, gc, dc):
X = np.random.rand(M, K, N).astype(np.float32)
tiles_arg = np.array([tiles], dtype=np.int32)
axis_arg = np.array([axis], dtype=np.int32)
op = core.CreateOperator(
'Tile', ['X', 'tiles', 'axis'], 'out',
)
def tile_ref(X, tiles, axis):
dims = np.asarray([1, 1, 1], dtype=np.int)
dims[axis] = tiles
tiled_data = np.tile(X, dims)
return (tiled_data,)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, tiles_arg, axis_arg],
tile_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, tiles_arg, axis_arg], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X, tiles_arg, axis_arg], 0, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/tile_op_test.py
|
from caffe2.python import core, workspace
from hypothesis import assume, given, settings
from caffe2.proto import caffe2_pb2
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import random
class TestUtilityOps(serial.SerializedTestCase):
@given(X=hu.tensor(), args=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_slice(self, X, args, gc, dc):
X = X.astype(dtype=np.float32)
dim = random.randint(0, X.ndim - 1)
slice_start = random.randint(0, X.shape[dim] - 1)
slice_end = random.randint(slice_start, X.shape[dim] - 1)
starts = np.array([0] * X.ndim).astype(np.int32)
ends = np.array([-1] * X.ndim).astype(np.int32)
starts[dim] = slice_start
ends[dim] = slice_end
if args:
op = core.CreateOperator(
"Slice", ["X"], ["Y"], starts=starts, ends=ends, device_option=gc
)
def slice_ref(X):
slc = [slice(None)] * X.ndim
slc[dim] = slice(slice_start, slice_end)
return [X[slc]]
inputs = [X]
else:
op = core.CreateOperator(
"Slice", ["X", "starts", "ends"], ["Y"], device_option=gc
)
def slice_ref(x, starts, ends):
slc = [slice(None)] * x.ndim
slc[dim] = slice(slice_start, slice_end)
return [x[slc]]
inputs = [X, starts, ends]
self.assertReferenceChecks(gc, op, inputs, slice_ref)
self.assertDeviceChecks(dc, op, inputs, [0])
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=inputs,
outputs_to_check=0,
outputs_with_grads=[0],
)
@given(ndims=st.integers(min_value=1, max_value=10), **hu.gcs)
@settings(deadline=10000)
def test_resize_like(self, ndims, gc, dc):
X = np.zeros((ndims * 2, ))
Y = np.zeros((ndims, 2))
op = core.CreateOperator(
"ResizeLike", ["X", "Y"], ["Z"],
)
def resize_like(X, Y):
return [X.reshape(Y.shape)]
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertReferenceChecks(gc, op, [X, Y], resize_like, ensure_outputs_are_inferred=True)
@given(dtype=st.sampled_from([np.float32, np.int32]),
ndims=st.integers(min_value=1, max_value=5),
seed=st.integers(min_value=0, max_value=65536),
null_axes=st.booleans(),
engine=st.sampled_from(['CUDNN', None]),
**hu.gcs)
@settings(deadline=10000)
def test_transpose(self, dtype, ndims, seed, null_axes, engine, gc, dc):
if (gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN"):
# cudnn 5.1 does not support int.
assume(workspace.GetCuDNNVersion() >= 6000 or dtype != np.int32)
dims = (np.random.rand(ndims) * 16 + 1).astype(np.int32)
X = (np.random.rand(*dims) * 16).astype(dtype)
if null_axes:
axes = None
op = core.CreateOperator(
"Transpose",
["input"], ["output"],
engine=engine)
else:
np.random.seed(int(seed))
axes = [int(v) for v in list(np.random.permutation(X.ndim))]
op = core.CreateOperator(
"Transpose",
["input"], ["output"],
axes=axes,
engine=engine)
def transpose_ref(x, axes):
return (np.transpose(x, axes),)
self.assertReferenceChecks(gc, op, [X, axes],
transpose_ref)
@given(m=st.integers(5, 10), n=st.integers(5, 10),
o=st.integers(5, 10), nans=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_nan_check(self, m, n, o, nans, gc, dc):
other = np.array([1, 2, 3]).astype(np.float32)
X = np.random.rand(m, n, o).astype(np.float32)
if nans:
x_nan = np.random.randint(0, m)
y_nan = np.random.randint(0, n)
z_nan = np.random.randint(0, o)
X[x_nan, y_nan, z_nan] = float('NaN')
# print('nans: {}'.format(nans))
# print(X)
def nan_reference(X, Y):
if not np.isnan(X).any():
return [X]
else:
return [np.array([])]
op = core.CreateOperator(
"NanCheck",
["X", "other"],
["Y"]
)
try:
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, other],
reference=nan_reference,
)
if nans:
self.assertTrue(False, "Did not fail when presented with NaN!")
except RuntimeError:
self.assertTrue(nans, "No NaNs but failed")
try:
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
if nans:
self.assertTrue(False, "Did not fail when gradient had NaN!")
except RuntimeError:
pass
@serial.given(n=st.integers(4, 5), m=st.integers(6, 7),
d=st.integers(2, 3), **hu.gcs)
def test_elementwise_max(self, n, m, d, gc, dc):
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
inputs = [X, Y, Z]
def max_op(X, Y, Z):
return [np.maximum(np.maximum(X, Y), Z)]
op = core.CreateOperator(
"Max",
["X", "Y", "Z"],
["mx"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=max_op,
)
self.assertDeviceChecks(dc, op, inputs, [0])
@given(n=st.integers(4, 5), m=st.integers(6, 7),
d=st.integers(2, 3), **hu.gcs)
@settings(deadline=10000)
def test_elementwise_max_grad(self, n, m, d, gc, dc):
go = np.random.rand(n, m, d).astype(np.float32)
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
mx = np.maximum(np.maximum(X, Y), Z)
inputs = [mx, go, X, Y, Z]
def max_grad_op(mx, go, X, Y, Z):
def mx_grad(a):
return go * (mx == a)
return [mx_grad(a) for a in [X, Y, Z]]
op = core.CreateOperator(
"MaxGradient",
["mx", "go", "X", "Y", "Z"],
["gX", "gY", "gZ"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=max_grad_op,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
@serial.given(n=st.integers(4, 5), m=st.integers(6, 7),
d=st.integers(2, 3), **hu.gcs)
def test_elementwise_min(self, n, m, d, gc, dc):
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
inputs = [X, Y, Z]
def min_op(X, Y, Z):
return [np.minimum(np.minimum(X, Y), Z)]
op = core.CreateOperator(
"Min",
["X", "Y", "Z"],
["mx"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=min_op,
)
self.assertDeviceChecks(dc, op, inputs, [0])
@given(n=st.integers(4, 5), m=st.integers(6, 7),
d=st.integers(2, 3), **hu.gcs)
@settings(deadline=10000)
def test_elementwise_min_grad(self, n, m, d, gc, dc):
go = np.random.rand(n, m, d).astype(np.float32)
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
mx = np.minimum(np.minimum(X, Y), Z)
inputs = [mx, go, X, Y, Z]
def min_grad_op(mx, go, X, Y, Z):
def mx_grad(a):
return go * (mx == a)
return [mx_grad(a) for a in [X, Y, Z]]
op = core.CreateOperator(
"MinGradient",
["mx", "go", "X", "Y", "Z"],
["gX", "gY", "gZ"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=min_grad_op,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
@given(
n=st.integers(1, 8), m=st.integers(1, 10), d=st.integers(1, 4),
in_place=st.booleans(), engine=st.sampled_from(["", "CUDNN"]),
seed=st.integers(min_value=0, max_value=65535),
dtype=st.sampled_from([np.int32, np.int64, np.float32]),
**hu.gcs)
@settings(deadline=10000)
def test_sum(
self, n, m, d, in_place, engine, seed, dtype, gc, dc):
input_names = []
input_vars = []
np.random.seed(seed)
for i in range(m):
X_name = 'X' + str(i)
input_names.extend([X_name])
var = np.random.rand(n, d).astype(dtype)
vars()[X_name] = var
input_vars.append(var)
def sum_op_ref(*args):
res = np.zeros((n, d))
for i in range(m):
res = res + args[i]
return (res, )
op = core.CreateOperator(
"Sum",
input_names,
[input_names[0]] if in_place else ['Y'],
engine=engine,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=input_vars,
reference=sum_op_ref,
)
self.assertDeviceChecks(dc, op, input_vars, [0])
@given(
inputs=hu.lengths_tensor().flatmap(
lambda pair: st.tuples(
st.just(pair[0]),
st.just(pair[1]),
hu.dims(max_value=len(pair[1])),
)
).flatmap(
lambda tup: st.tuples(
st.just(tup[0]),
st.just(tup[1]),
hu.arrays(
tup[2], dtype=np.int32,
elements=st.integers(
min_value=0, max_value=len(tup[1]) - 1)),
)
),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_gather(self, inputs, gc, dc):
items = inputs[0]
lengths = inputs[1]
indices = inputs[2]
def lengths_gather_op(items, lengths, indices):
ends = np.cumsum(lengths)
return [np.concatenate(
list(items[ends[i] - lengths[i]:ends[i]] for i in indices))]
op = core.CreateOperator(
"LengthsGather",
["items", "lengths", "indices"],
["output"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[items, lengths, indices],
reference=lengths_gather_op,
)
@given(
inputs=hu.lengths_tensor(),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_lengths_to_ranges(self, inputs, gc, dc):
_, lengths = inputs
def lengths_to_ranges_op(lengths):
return [
[[x, y] for x, y in zip(np.cumsum(np.append([0], lengths)),
lengths)]
]
op = core.CreateOperator(
"LengthsToRanges",
["lengths"],
["output"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[lengths],
reference=lengths_to_ranges_op,
)
# Test shape inference logic
net = core.Net("test_shape_inference")
workspace.FeedBlob("lengths", lengths)
output = net.LengthsToRanges(
["lengths"],
["output"]
)
(shapes, types) = workspace.InferShapesAndTypes([net])
workspace.RunNetOnce(net)
self.assertEqual(shapes[output], list(workspace.blobs[output].shape))
self.assertEqual(shapes[output], list(lengths.shape) + [2])
self.assertEqual(types[output], core.DataType.INT32)
@given(**hu.gcs)
@settings(deadline=None, max_examples=50)
def test_size_op(self, gc, dc):
X = np.array([[1, 2], [3, 4]]).astype(np.float32)
def size_op(tensor):
return [np.prod(tensor.shape)]
op = core.CreateOperator(
"Size",
["X"],
["output"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=size_op,
)
def test_alias_op(self):
""" Don't use hypothesis because there are only 2 cases to check"""
for size in [0, 5]:
X = np.arange(size).astype(np.float32)
workspace.FeedBlob('X', X)
op = core.CreateOperator(
"Alias",
["X"],
["Y"]
)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
np.testing.assert_array_equal(X, Y)
@given(**hu.gcs)
@settings(deadline=10000)
def test_range(self, gc, dc):
names = [
('stop_',),
('start_', 'stop_'),
('start_', 'stop_', 'step_'),
]
# Most random values aren't great here, so use a fixed set instead of
# hypothesis.
for inputs in (
(10,),
(np.float32(10.0),),
(0,),
(0, 0),
(10., 5.0, -1.),
(2, 10000),
(2, 10000, 20000),
(2, 10000, -1),
):
inputs = [np.array(v) for v in inputs]
op = core.CreateOperator(
"Range",
names[len(inputs) - 1],
["Y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=lambda *x: [np.arange(*x)],
)
self.assertDeviceChecks(dc, op, inputs, [0])
inputs = (np.array(0), np.array(10), np.array(0))
op = core.CreateOperator(
"Range",
names[len(inputs) - 1],
["Y"]
)
with self.assertRaisesRegex(RuntimeError, 'Step size cannot be 0'):
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=lambda *x: [np.arange(*x)],
)
|
pytorch-master
|
caffe2/python/operator_test/utility_ops_test.py
|
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
class TestDuplicateOperands(TestCase):
def test_duplicate_operands(self):
net = core.Net('net')
shape = (2, 4)
x_in = np.random.uniform(size=shape)
x = net.GivenTensorFill([], 'X', shape=shape,
values=x_in.flatten().tolist())
xsq = net.Mul([x, x])
y = net.DotProduct([xsq, xsq])
net.AddGradientOperators([y])
workspace.RunNetOnce(net)
self.assertTrue(np.allclose(workspace.FetchBlob('X_grad'),
4 * x_in**3))
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/duplicate_operands_test.py
|
import functools
import operator
import string
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
from caffe2.python import core, dataset, workspace
from caffe2.python.dataset import Const
from caffe2.python.schema import (
FeedRecord,
FetchRecord,
Field,
List,
Map,
NewRecord,
Scalar,
Struct,
from_blob_list,
)
from caffe2.python.test_util import TestCase
from hypothesis import given
def _assert_arrays_equal(actual, ref, err_msg):
if ref.dtype.kind in ("S", "O", "U"):
np.testing.assert_array_equal(actual, ref, err_msg=err_msg)
else:
np.testing.assert_allclose(actual, ref, atol=1e-4, rtol=1e-4, err_msg=err_msg)
def _assert_records_equal(actual, ref):
assert isinstance(actual, Field)
assert isinstance(ref, Field)
b1 = actual.field_blobs()
b2 = ref.field_blobs()
assert len(b1) == len(b2), "Records have different lengths: %d vs. %d" % (
len(b1),
len(b2),
)
for name, d1, d2 in zip(ref.field_names(), b1, b2):
_assert_arrays_equal(d1, d2, err_msg="Mismatch in field %s." % name)
@st.composite
def _sparse_features_map(draw, num_records, **kwargs):
sparse_maps_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=num_records,
max_size=num_records,
)
)
sparse_maps_total_length = sum(sparse_maps_lengths)
sparse_keys = draw(
st.lists(
st.integers(min_value=1, max_value=100),
min_size=sparse_maps_total_length,
max_size=sparse_maps_total_length,
unique=True,
)
)
sparse_values_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=sparse_maps_total_length,
max_size=sparse_maps_total_length,
)
)
total_sparse_values_lengths = sum(sparse_values_lengths)
sparse_values = draw(
# max_value is max int64
st.lists(
st.integers(min_value=1, max_value=9223372036854775807),
min_size=total_sparse_values_lengths,
max_size=total_sparse_values_lengths,
)
)
return [
sparse_maps_lengths,
sparse_keys,
sparse_values_lengths,
sparse_values,
]
@st.composite
def _dense_features_map(draw, num_records, **kwargs):
float_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=num_records,
max_size=num_records,
)
)
total_length = sum(float_lengths)
float_keys = draw(
st.lists(
st.integers(min_value=1, max_value=100),
min_size=total_length,
max_size=total_length,
unique=True,
)
)
float_values = draw(
st.lists(st.floats(), min_size=total_length, max_size=total_length)
)
return [float_lengths, float_keys, float_values]
@st.composite
def _dataset(draw, min_elements=3, max_elements=10, **kwargs):
schema = Struct(
# Dense Features Map
("floats", Map(Scalar(np.int32), Scalar(np.float32))),
# Sparse Features Map
(
"int_lists",
Map(
Scalar(np.int32),
List(Scalar(np.int64)),
),
),
# Complex Type
("text", Scalar(str)),
)
num_records = draw(st.integers(min_value=min_elements, max_value=max_elements))
raw_dense_features_map_contents = draw(_dense_features_map(num_records))
raw_sparse_features_map_contents = draw(_sparse_features_map(num_records))
raw_text_contents = [
draw(
st.lists(
st.text(alphabet=string.ascii_lowercase),
min_size=num_records,
max_size=num_records,
)
)
]
# Concatenate all raw contents to a single one
contents_raw = (
raw_dense_features_map_contents
+ raw_sparse_features_map_contents
+ raw_text_contents
)
contents = from_blob_list(schema, contents_raw)
return (schema, contents, num_records)
class TestDatasetOps(TestCase):
@given(_dataset())
def test_pack_unpack(self, input):
"""
Tests if packing and unpacking of the whole dataset is an identity.
"""
(schema, contents, num_records) = input
dataset_fields = schema.field_names()
for pack_to_single_shared_ptr in (True, False):
net = core.Net("pack_unpack_net")
batch = NewRecord(net, contents)
FeedRecord(batch, contents)
packed = net.PackRecords(
batch.field_blobs(),
1,
fields=dataset_fields,
pack_to_single_shared_ptr=pack_to_single_shared_ptr,
)
unpacked = packed.UnPackRecords(
[], len(dataset_fields), fields=dataset_fields
)
workspace.RunNetOnce(net)
for initial_tensor, unpacked_tensor in zip(batch.field_blobs(), unpacked):
npt.assert_array_equal(
workspace.FetchBlob(initial_tensor),
workspace.FetchBlob(unpacked_tensor),
)
def test_dataset_ops(self):
"""
1. Defining the schema of our dataset.
This example schema could represent, for example, a search query log.
"""
schema = Struct(
# fixed size vector, which will be stored as a matrix when batched
("dense", Scalar((np.float32, 3))),
# could represent a feature map from feature ID to float value
("floats", Map(Scalar(np.int32), Scalar(np.float32))),
# could represent a multi-valued categorical feature map
(
"int_lists",
Map(
Scalar(np.int32),
List(Scalar(np.int64)),
),
),
# could represent a multi-valued, weighted categorical feature map
(
"id_score_pairs",
Map(
Scalar(np.int32),
Map(
Scalar(np.int64),
Scalar(np.float32),
keys_name="ids",
values_name="scores",
),
),
),
# additional scalar information
(
"metadata",
Struct(
("user_id", Scalar(np.int64)),
("user_embed", Scalar((np.float32, 2))),
("query", Scalar(str)),
),
),
)
"""
This is what the flattened fields for this schema look like, along
with its type. Each one of these fields will be stored, read and
written as a tensor.
"""
expected_fields = [
("dense", (np.float32, 3)),
("floats:lengths", np.int32),
("floats:values:keys", np.int32),
("floats:values:values", np.float32),
("int_lists:lengths", np.int32),
("int_lists:values:keys", np.int32),
("int_lists:values:values:lengths", np.int32),
("int_lists:values:values:values", np.int64),
("id_score_pairs:lengths", np.int32),
("id_score_pairs:values:keys", np.int32),
("id_score_pairs:values:values:lengths", np.int32),
("id_score_pairs:values:values:values:ids", np.int64),
("id_score_pairs:values:values:values:scores", np.float32),
("metadata:user_id", np.int64),
("metadata:user_embed", (np.float32, 2)),
("metadata:query", str),
]
zipped = zip(expected_fields, schema.field_names(), schema.field_types())
for (ref_name, ref_type), name, dtype in zipped:
self.assertEquals(ref_name, name)
self.assertEquals(np.dtype(ref_type), dtype)
"""
2. The contents of our dataset.
Contents as defined below could represent, for example, a log of
search queries along with dense, sparse features and metadata.
The dataset below has 3 top-level entries.
"""
contents_raw = [
# dense
[[1.1, 1.2, 1.3], [2.1, 2.2, 2.3], [3.1, 3.2, 3.3]],
# floats
[1, 2, 3], # len
[11, 21, 22, 31, 32, 33], # key
[1.1, 2.1, 2.2, 3.1, 3.2, 3.3], # value
# int lists
[2, 0, 1], # len
[11, 12, 31], # key
[2, 4, 3], # value:len
[111, 112, 121, 122, 123, 124, 311, 312, 313], # value:value
# id score pairs
[1, 2, 2], # len
[11, 21, 22, 31, 32], # key
[1, 1, 2, 2, 3], # value:len
[111, 211, 221, 222, 311, 312, 321, 322, 323], # value:ids
[11.1, 21.1, 22.1, 22.2, 31.1, 31.2, 32.1, 32.2, 32.3], # val:score
# metadata
[123, 234, 456], # user_id
[[0.2, 0.8], [0.5, 0.5], [0.7, 0.3]], # user_embed
["dog posts", "friends who like to", "posts about ca"], # query
]
# convert the above content to ndarrays, checking against the schema
contents = from_blob_list(schema, contents_raw)
"""
3. Creating and appending to the dataset.
We first create an empty dataset with the given schema.
Then, a Writer is used to append these entries to the dataset.
"""
ds = dataset.Dataset(schema)
net = core.Net("init")
with core.NameScope("init"):
ds.init_empty(net)
content_blobs = NewRecord(net, contents)
FeedRecord(content_blobs, contents)
writer = ds.writer(init_net=net)
writer.write_record(net, content_blobs)
workspace.RunNetOnce(net)
"""
4. Iterating through the dataset contents.
If we were to iterate through the top level entries of our dataset,
this is what we should expect to see:
"""
entries_raw = [
(
[[1.1, 1.2, 1.3]], # dense
[1],
[11],
[1.1], # floats
[2],
[11, 12],
[2, 4],
[111, 112, 121, 122, 123, 124], # intlst
[1],
[11],
[1],
[111],
[11.1], # id score pairs
[123],
[[0.2, 0.8]],
["dog posts"], # metadata
),
(
[[2.1, 2.2, 2.3]], # dense
[2],
[21, 22],
[2.1, 2.2], # floats
[0],
[],
[],
[], # int list
[2],
[21, 22],
[1, 2],
[211, 221, 222],
[21.1, 22.1, 22.2],
[234],
[[0.5, 0.5]],
["friends who like to"], # metadata
),
(
[[3.1, 3.2, 3.3]], # dense
[3],
[31, 32, 33],
[3.1, 3.2, 3.3], # floats
[1],
[31],
[3],
[311, 312, 313], # int lst
[2],
[31, 32],
[2, 3],
[311, 312, 321, 322, 323],
[31.1, 31.2, 32.1, 32.2, 32.3], # id score list
[456],
[[0.7, 0.3]],
["posts about ca"], # metadata
),
# after the end of the dataset, we will keep getting empty vectors
([],) * 16,
([],) * 16,
]
entries = [from_blob_list(schema, e) for e in entries_raw]
"""
Let's go ahead and create the reading nets.
We will run `read` net multiple times and assert that we are reading the
entries the way we stated above.
"""
read_init_net = core.Net("read_init")
read_next_net = core.Net("read_next")
reader = ds.reader(read_init_net)
should_continue, batch = reader.read_record(read_next_net)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for entry in entries:
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
"""
5. Reading/writing in a single plan
If all of operations on the data are expressible as Caffe2 operators,
we don't need to load the data to python, iterating through the dataset
in a single Plan.
Where we will process the dataset a little and store it in a second
dataset. We can reuse the same Reader since it supports reset.
"""
reset_net = core.Net("reset_net")
reader.reset(reset_net)
read_step, batch = reader.execution_step()
""" We will add the line number * 1000 to the feature ids. """
process_net = core.Net("process")
line_no = Const(process_net, 0, dtype=np.int32)
const_one = Const(process_net, 1000, dtype=np.int32)
process_net.Add([line_no, const_one], [line_no])
field = batch.floats.keys.get()
process_net.Print(field, [])
process_net.Add([field, line_no], field, broadcast=1, axis=0)
""" Lets create a second dataset and append to it. """
ds2 = dataset.Dataset(schema, name="dataset2")
ds2.init_empty(reset_net)
writer = ds2.writer(reset_net)
writer.write_record(process_net, batch)
# commit is not necessary for DatasetWriter but will add it for
# generality of the example
commit_net = core.Net("commit")
writer.commit(commit_net)
""" Time to create and run a plan which will do the processing """
plan = core.Plan("process")
plan.AddStep(core.execution_step("reset", reset_net))
plan.AddStep(read_step.AddNet(process_net))
plan.AddStep(core.execution_step("commit", commit_net))
workspace.RunPlan(plan)
"""
Now we should have dataset2 populated.
"""
ds2_data = FetchRecord(ds2.content())
field = ds2_data.floats.keys
field.set(blob=field.get() - [1000, 2000, 2000, 3000, 3000, 3000])
_assert_records_equal(contents, ds2_data)
"""
6. Slicing a dataset
You can create a new schema from pieces of another schema and reuse
the same data.
"""
subschema = Struct(("top_level", schema.int_lists.values))
int_list_contents = contents.int_lists.values.field_names()
self.assertEquals(len(subschema.field_names()), len(int_list_contents))
"""
7. Random Access a dataset
"""
read_init_net = core.Net("read_init")
read_next_net = core.Net("read_next")
idx = np.array([2, 1, 0])
indices_blob = Const(read_init_net, idx, name="indices")
reader = ds.random_reader(read_init_net, indices_blob)
reader.computeoffset(read_init_net)
should_stop, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for i in range(len(entries)):
k = idx[i] if i in idx else i
entry = entries[k]
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
workspace.RunNet(str(read_next_net))
self.assertEquals(True, workspace.FetchBlob(should_stop))
"""
8. Random Access a dataset with loop_over = true
"""
read_init_net = core.Net("read_init")
read_next_net = core.Net("read_next")
idx = np.array([2, 1, 0])
indices_blob = Const(read_init_net, idx, name="indices")
reader = ds.random_reader(read_init_net, indices_blob, loop_over=True)
reader.computeoffset(read_init_net)
should_stop, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for _ in range(len(entries) * 3):
workspace.RunNet(str(read_next_net))
self.assertEquals(False, workspace.FetchBlob(should_stop))
"""
9. Sort and shuffle a dataset
This sort the dataset using the score of a certain column,
and then shuffle within each chunk of size batch_size * shuffle_size
before shuffling the chunks.
"""
read_init_net = core.Net("read_init")
read_next_net = core.Net("read_next")
reader = ds.random_reader(read_init_net)
reader.sort_and_shuffle(read_init_net, "int_lists:lengths", 1, 2)
reader.computeoffset(read_init_net)
should_continue, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
expected_idx = np.array([2, 1, 0])
for i in range(len(entries)):
k = expected_idx[i] if i in expected_idx else i
entry = entries[k]
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
"""
Trim a dataset
"""
trim_net = core.Net("trim_ds")
ds.trim(trim_net, multiple_of=2)
workspace.RunNetOnce(trim_net)
trimmed = FetchRecord(ds.content())
EXPECTED_SIZES = [2, 2, 3, 3, 2, 2, 2, 6, 2, 3, 3, 4, 4, 2, 2, 2]
actual_sizes = [d.shape[0] for d in trimmed.field_blobs()]
self.assertEquals(EXPECTED_SIZES, actual_sizes)
def test_last_n_window_ops(self):
collect_net = core.Net("collect_net")
collect_net.GivenTensorFill(
[],
"input",
shape=[3, 2],
values=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
)
input_array = np.array(list(range(1, 7)), dtype=np.float32).reshape(3, 2)
workspace.CreateBlob("output")
workspace.FeedBlob("next", np.array(0, dtype=np.int32))
collect_net.LastNWindowCollector(
["output", "next", "input"],
["output", "next"],
num_to_collect=7,
)
plan = core.Plan("collect_data")
plan.AddStep(core.execution_step("collect_data", [collect_net], num_iter=1))
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob("output")
npt.assert_array_equal(input_array, reference_result)
plan = core.Plan("collect_data")
plan.AddStep(core.execution_step("collect_data", [collect_net], num_iter=2))
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob("output")
npt.assert_array_equal(input_array[[1, 2, 2, 0, 1, 2, 0]], reference_result)
plan = core.Plan("collect_data")
plan.AddStep(core.execution_step("collect_data", [collect_net], num_iter=3))
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob("output")
npt.assert_array_equal(input_array[[2, 0, 1, 2, 2, 0, 1]], reference_result)
def test_last_n_window_ops_shape_inference(self):
collect_net = core.Net("collect_net")
collect_net.GivenTensorFill(
[],
"input",
shape=[3, 2],
values=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
)
workspace.CreateBlob("output")
workspace.FeedBlob("next", np.array(0, dtype=np.int32))
collect_net.LastNWindowCollector(
["output", "next", "input"],
["output", "next"],
num_to_collect=7,
)
(shapes, types) = workspace.InferShapesAndTypes([collect_net])
workspace.RunNetOnce(collect_net)
self.assertTrue(
np.array_equal(
shapes["output"], np.array([7, workspace.blobs["output"].shape[1]])
)
)
def test_last_n_window_ops_shape_inference_4d_input(self):
input_shape = [3, 2, 4, 5]
collect_net = core.Net("collect_net")
collect_net.GivenTensorFill(
[],
"input",
shape=input_shape,
values=[
float(val) for val in range(functools.reduce(operator.mul, input_shape))
],
)
workspace.CreateBlob("output")
workspace.FeedBlob("next", np.array(0, dtype=np.int32))
collect_net.LastNWindowCollector(
["output", "next", "input"],
["output", "next"],
num_to_collect=7,
)
(shapes, types) = workspace.InferShapesAndTypes([collect_net])
workspace.RunNetOnce(collect_net)
self.assertTrue(
np.array_equal(
shapes["output"], np.array([7, *list(workspace.blobs["output"].shape[1:])])
)
)
def test_collect_tensor_ops(self):
init_net = core.Net("init_net")
blobs = ["blob_1", "blob_2", "blob_3"]
bvec_map = {}
ONE = init_net.ConstantFill([], "ONE", shape=[1, 2], value=1)
for b in blobs:
init_net.ConstantFill([], [b], shape=[1, 2], value=0)
bvec_map[b] = b + "_vec"
init_net.CreateTensorVector([], [bvec_map[b]])
reader_net = core.Net("reader_net")
for b in blobs:
reader_net.Add([b, ONE], [b])
collect_net = core.Net("collect_net")
num_to_collect = 1000
max_example_to_cover = 100000
bvec = [bvec_map[b] for b in blobs]
collect_net.CollectTensor(
bvec + blobs,
bvec,
num_to_collect=num_to_collect,
)
print("Collect Net Proto: {}".format(collect_net.Proto()))
plan = core.Plan("collect_data")
plan.AddStep(core.execution_step("collect_init", init_net))
plan.AddStep(
core.execution_step(
"collect_data", [reader_net, collect_net], num_iter=max_example_to_cover
)
)
workspace.RunPlan(plan)
# concat the collected tensors
concat_net = core.Net("concat_net")
bconcated_map = {}
bsize_map = {}
for b in blobs:
bconcated_map[b] = b + "_concated"
bsize_map[b] = b + "_size"
concat_net.ConcatTensorVector([bvec_map[b]], [bconcated_map[b]])
concat_net.TensorVectorSize([bvec_map[b]], [bsize_map[b]])
workspace.RunNetOnce(concat_net)
# check data
reference_result = workspace.FetchBlob(bconcated_map[blobs[0]])
self.assertEqual(
reference_result.shape, (min(num_to_collect, max_example_to_cover), 2)
)
size = workspace.FetchBlob(bsize_map[blobs[0]])
self.assertEqual(tuple(), size.shape)
self.assertEqual(min(num_to_collect, max_example_to_cover), size.item())
hist, _ = np.histogram(
reference_result[:, 0], bins=10, range=(1, max_example_to_cover)
)
print("Sample histogram: {}".format(hist))
self.assertTrue(all(hist > 0.6 * (num_to_collect / 10)))
for i in range(1, len(blobs)):
result = workspace.FetchBlob(bconcated_map[blobs[i]])
self.assertEqual(reference_result.tolist(), result.tolist())
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/dataset_ops_test.py
|
import hypothesis.strategies as st
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
class TestNGramOps(hu.HypothesisTestCase):
@given(
seed=st.integers(0, 2**32 - 1),
N=st.integers(min_value=10, max_value=100),
D=st.integers(min_value=2, max_value=10),
out_of_vcb=st.floats(min_value=0, max_value=0.5),
max_categorical_limit=st.integers(min_value=5, max_value=20),
max_in_vcb_val=st.integers(min_value=1000, max_value=10000),
**hu.gcs_cpu_only
)
def test_ngram_from_categorical_op(
self,
seed,
N,
D,
out_of_vcb,
max_categorical_limit,
max_in_vcb_val,
gc,
dc,
):
np.random.seed(seed)
col_num = max(int(D / 2), 1)
col_ids = np.random.choice(D, col_num, False).astype(np.int32)
categorical_limits = np.random.randint(
2, high=max_categorical_limit, size=col_num
).astype(np.int32)
vcb = [
np.random.choice(max_in_vcb_val, x, False)
for x in categorical_limits
]
vals = np.array([x for l in vcb for x in l], dtype=np.int32)
# Enforce round(floats) to be negative.
floats = np.random.rand(N, D).astype(np.float32) - 2
expected_output = []
for i in range(N):
val = 0
for (k, j) in enumerate(col_ids):
base = np.prod(categorical_limits[:k])
r = np.random.randint(categorical_limits[k])
p = np.random.rand()
if p > out_of_vcb:
val += base * r
floats[i][j] = vcb[k][r]
expected_output.append(val)
expected_output = np.array(expected_output, dtype=np.int32)
workspace.ResetWorkspace()
workspace.FeedBlob('floats', floats)
op = core.CreateOperator(
"NGramFromCategorical",
['floats'],
['output'],
col_ids=col_ids,
categorical_limits=categorical_limits,
vals=vals,
)
workspace.RunOperatorOnce(op)
output = workspace.blobs['output']
np.testing.assert_array_equal(output, expected_output)
|
pytorch-master
|
caffe2/python/operator_test/ngram_ops_test.py
|
import numpy as np
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util, model_helper, brew, build
@unittest.skipIf(build.CAFFE2_NO_OPERATOR_SCHEMA,
'Built with CAFFE2_NO_OPERATOR_SCHEMA')
class TestShapeInference(test_util.TestCase):
def testShapeInferenceSimpleFC(self):
m = model_helper.ModelHelper(name="test_model")
brew.fc(m, "data", "fc1", dim_in=96, dim_out=32)
brew.fc(m, "fc1", "fc2", dim_in=32, dim_out=55)
for b in [0, 64]:
(shapes, types) = workspace.InferShapesAndTypes(
[m.param_init_net, m.net],
{'data': [b, 96]}
)
self.assertEquals(shapes['data'], [b, 96])
self.assertEquals(shapes['fc1_w'], [32, 96])
self.assertEquals(shapes['fc1_b'], [32])
self.assertEquals(shapes['fc1'], [b, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
self.assertEquals(shapes['fc2'], [b, 55])
def testFCAxis2(self):
model = model_helper.ModelHelper(name="test_model")
model.net.FC(["x", "w", "b"], ["y"], axis=2)
workspace.FeedBlob("x", np.random.rand(4, 20, 36).astype(np.float32))
workspace.FeedBlob("w", np.random.rand(36, 36).astype(np.float32))
workspace.FeedBlob("b", np.random.rand(36,).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testFCTransposed(self):
model = model_helper.ModelHelper(name="test_model")
model.net.FCTransposed(["x", "wt", "b"], ["y"])
workspace.FeedBlob("x", np.random.rand(20, 36).astype(np.float32))
workspace.FeedBlob("wt", np.random.rand(36, 48).astype(np.float32))
workspace.FeedBlob("b", np.random.rand(48,).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceSlice(self):
model = model_helper.ModelHelper(name="test_model")
model.net.Slice(["x"], ["y"], starts=[0, 0, 0, 0], ends=[-1, -1, -3, -1])
workspace.FeedBlob("x", np.random.rand(64, 1, 255, 384).astype(np.float32))
slice_starts = np.array([0, 0, 0, 0]).astype(np.int32)
slice_ends = np.array([-1, -1, -3, -1]).astype(np.int32)
slice_starts = model.net.GivenTensorIntFill(
[], shape=[4], values=slice_starts)
slice_ends = model.net.GivenTensorIntFill(
[], shape=[4], values=slice_ends)
model.net.Slice(["x2", slice_starts, slice_ends], ["y2"])
workspace.FeedBlob("x2", np.random.rand(64, 1, 255, 384).astype(np.float32))
self.InferTensorRunAndCompare(model, ["y2"])
def testShapeInferenceDistances(self):
model = model_helper.ModelHelper(name="test_model")
model.net.L1Distance(["x1", "y1"], "dl1_D1")
model.net.SquaredL2Distance(["x1", "y1"], "dl2_D1")
model.net.CosineSimilarity(["x1", "y1"], "dcos_D1")
model.net.DotProduct(["x1", "y1"], "ddot_D1")
model.net.DotProductWithPadding(["x1", "y1"], "ddotpad_D1")
model.net.L1Distance(["x2", "y2"], "dl1_D2")
model.net.SquaredL2Distance(["x2", "y2"], "dl2_D2")
model.net.CosineSimilarity(["x2", "y2"], "dcos_D2")
model.net.DotProduct(["x2", "y2"], "ddot_D2")
model.net.DotProductWithPadding(["x2", "z2"], "ddotpad_D2")
workspace.FeedBlob("x1", np.random.rand(10).astype(np.float32))
workspace.FeedBlob("y1", np.random.rand(10).astype(np.float32))
workspace.FeedBlob("x2", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("y2", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("z2", np.random.rand(10, 4).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceReduceBackFrontX(self):
model = model_helper.ModelHelper(name="test_model")
model.net.ReduceBackSum(["x"], ["x_back_sum"])
model.net.ReduceBackMean(["x"], ["x_back_mean"])
model.net.ReduceBackMax(["x"], ["x_back_max"])
model.net.ReduceFrontSum(["x"], ["x_front_sum"])
model.net.ReduceFrontMean(["x"], ["x_front_mean"])
model.net.ReduceFrontMax(["x"], ["x_front_max"])
workspace.FeedBlob("x", np.random.rand(10, 12, 18).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testGather(self):
model = model_helper.ModelHelper(name="test_model")
model.net.Gather(["X", "idx"], "Y")
workspace.FeedBlob("X", np.random.rand(100, 4, 5).astype(np.float32))
workspace.FeedBlob("idx", np.array([[3, 18], [99, 4], [2, 5]]).astype(np.int32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceConvNet(self):
model = model_helper.ModelHelper(name="convtest")
model.NHWC2NCHW("data", "data_nchw")
brew.conv(model, "data_nchw", 'conv1', 3, 64,
weight_init=("MSRAFill", {}), kernel=7,
stride=2, pad=3, no_bias=0)
brew.spatial_bn(model, 'conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3, is_test=False)
brew.relu(model, 'conv1_spatbn_relu', 'conv1_spatbn_relu')
brew.max_pool(model, 'conv1_spatbn_relu', 'pool1', kernel=3, stride=2)
brew.fc(model, 'pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=100)
brew.dropout(model, 'fc', 'fc_drop', is_test=False)
model.Sigmoid('fc_drop', 'fc_sigm')
brew.softmax(model, 'fc_sigm', 'softmax')
model.LabelCrossEntropy(['softmax', 'label'], 'xent')
loss = model.AveragedLoss('xent', 'loss')
model.AddGradientOperators([loss])
LR = model.param_init_net.ConstantFill(
[], 'LR', shape=[1], value=0.1
)
for param in model.GetParams():
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, LR, param],
[param_grad, param_momentum, param],
)
workspace.FeedBlob(
"data",
np.random.rand(16, 227, 227, 3).astype(np.float32),
)
workspace.FeedBlob(
"label",
(100 * np.random.rand(16)).astype(np.int32),
)
workspace.FeedBlob(
"label",
(100 * np.random.rand(16)).astype(np.int32),
)
# Then do automatic comparison test: run the next once to
# initialize everything
self.InferTensorRunAndCompare(model)
def testShapeInferenceTranspose(self):
model = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"tensor",
np.random.rand(4, 2, 3, 3, 5).astype(np.float32)
)
# Testing with axes undefined
brew.transpose(
model,
["tensor"],
"transpose",
)
self.InferTensorRunAndCompare(model)
# Testing with axes defined
brew.transpose(
model,
["tensor"],
"transpose",
axes=np.random.permutation(5)
)
return self.InferTensorRunAndCompare(model)
def testShapeInferencePad(self):
model = model_helper.ModelHelper(name="padtest")
model.PadImage("data", 'padded', pad_t=100, pad_l=37, pad_b=28,
pad_r=20, mode="constant", order="NCHW")
workspace.FeedBlob(
"data",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceTwoClass(self):
model = model_helper.ModelHelper(name="twoclass")
model.MakeTwoClass("v", "v2")
workspace.FeedBlob("v", np.random.rand(32).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferencePadZero(self):
model = model_helper.ModelHelper(name="padtest")
model.PadImage("data", 'padded', pad=0, mode="constant",
order="NCHW")
workspace.FeedBlob(
"data",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceMatMul(self):
model = model_helper.ModelHelper(name="test_model")
model.MatMul(["x", "y"], "MatMul")
workspace.FeedBlob("x", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("y", np.random.rand(5, 10).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceSoftmaxWithLoss(self):
model = model_helper.ModelHelper(name="test_model")
model.SoftmaxWithLoss(
["logits", "labels"],
["softmax", "loss"],
)
# 2D Shape of [batch_size, num_classes]
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
# Shape of size batch_size with all values [0, num_classes)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=(4, 1)).astype(np.int32),
)
self.InferTensorRunAndCompare(model)
# Testing with 1D labels arg
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=4).astype(np.int32),
)
self.InferTensorRunAndCompare(model)
# Testing with weight_tensor
model.SoftmaxWithLoss(
["logits", "labels", "weight_tensor"],
["softmax", "loss"],
)
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=4).astype(np.int32),
)
workspace.FeedBlob(
"weight_tensor",
np.random.rand(4).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test spatial model
model = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"img",
np.random.rand(32, 19, 33, 28).astype(np.float32)
)
workspace.FeedBlob(
"img_labels",
(np.random.rand(32, 33, 28) * 19).astype(np.int32)
)
model.SpatialSoftmaxWithLoss(
["img", "img_labels"],
["softmax_img", "loss"],
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceIm2Col(self):
# Test with NCHW
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel=4, dilation=2, stride=2,
order="NCHW")
workspace.FeedBlob(
"X",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test with NHWC
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel=4, dilation=2, stride=2,
order="NHWC")
workspace.FeedBlob(
"X",
np.random.rand(16, 228, 228, 3).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test with different width and height
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel_h=8, kernel_w=4,
dilation=2, stride=2)
workspace.FeedBlob(
"X",
np.random.rand(16, 3, 228, 114).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceTile(self):
m = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"tensor",
np.random.rand(4, 2, 3, 3, 5).astype(np.float32)
)
# Testing with axes undefined
for i in range(0, 4):
m.net.Tile(
"tensor", "tiled_tensor_{}".format(i), tiles=5, axis=i)
self.InferTensorRunAndCompare(m)
def testShapeInferenceFlatten(self):
model = model_helper.ModelHelper(name="test_model")
model.FlattenToVec("X", "FlatVec")
model.FlattenToVec("empty", "EmptyFlatVec")
workspace.FeedBlob("X", np.random.rand(17, 5, 13).astype(np.float32))
workspace.FeedBlob("empty", np.random.rand(0, 2, 3).astype(np.float32))
self.InferTensorRunAndCompare(model)
# test Flatten with default axis (=1)
model = model_helper.ModelHelper(name="test_model")
model.Flatten("X", "Flat")
model.Flatten("empty", "EmptyFlat")
workspace.FeedBlob("X", np.random.rand(17, 5, 13).astype(np.float32))
workspace.FeedBlob("empty", np.random.rand(0, 2, 3).astype(np.float32))
self.InferTensorRunAndCompare(model)
# test Flatten with axis
model = model_helper.ModelHelper(name="test_model")
x = np.random.randn(17, 5, 13)
for axis in range(x.ndim + 1):
model.Flatten("x", "Flat", axis=axis)
workspace.FeedBlob("x", x)
self.InferTensorRunAndCompare(model)
empty = np.random.randn(0, 5, 13)
for axis in range(empty.ndim + 1):
model.Flatten("empty", "Flat", axis=axis)
workspace.FeedBlob("empty", empty)
self.InferTensorRunAndCompare(model)
def testShapeInferenceReshape(self):
model = model_helper.ModelHelper(name="test_model")
model.Reshape("X", ["Reshaped", "Old_Shape"], shape=[8, 0, -1, 2])
workspace.FeedBlob("X", np.random.rand(4, 26, 32).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceUnique(self):
for n in [0, 1]:
model = model_helper.ModelHelper(name="test_model")
model.Unique("X", ["Y"])
model.Unique("X", ["Z", "remap"])
workspace.FeedBlob("X", np.random.rand(n).astype(np.int64))
self.InferTensorRunAndCompare(model)
def testLengthsSum(self):
model = model_helper.ModelHelper(name="test_model")
model.LengthsSum(["X", "length"], ["sum"])
workspace.FeedBlob("X", np.random.rand(6, 32).astype(np.float32))
workspace.FeedBlob("length", np.array([1, 2, 3], dtype=np.int32))
self.InferTensorRunAndCompare(model)
def testLengthsPad(self):
model = model_helper.ModelHelper(name="test_model")
model.LengthsPad(
["X", "length"],
["X_padded"],
target_length=10,
padding_value=-1.0,
)
workspace.FeedBlob("X", np.random.rand(6, 32).astype(np.float32))
workspace.FeedBlob("length", np.array([1, 2, 3], dtype=np.int32))
self.InferTensorRunAndCompare(model)
def testConcat(self):
net = core.Net("concat")
net.Concat(["A", "B"], ["C", "splits"], axis=1)
net.Concat(["C", "D"], ["E", "splitsE"], order="NCHW")
net.Concat(["E", "F"], ["G", "splitsG"], add_axis=1, order="NHWC")
(shapes, types) = workspace.InferShapesAndTypes(
[net],
{
'A': [10, 12, 9, 10],
'B': [10, 9, 9, 10],
'D': [10, 2, 9, 10],
'F': [10, 23, 9, 10]
}
)
self.assertEqual(shapes['C'], [10, 21, 9, 10])
self.assertEqual(shapes['splits'], [2])
self.assertEqual(shapes['E'], [10, 23, 9, 10])
self.assertEqual(shapes['G'], [10, 23, 9, 2, 10])
def testConcatInt32(self):
net = core.Net("concat")
net.Concat(["A", "B"], ["C", "splits"], axis=1)
net.Concat(["C", "D"], ["E", "splitsE"], order="NCHW")
net.Concat(["E", "F"], ["G", "splitsG"], add_axis=1, order="NHWC")
(shapes, types) = workspace.InferShapesAndTypes(
[net],
blob_dimensions={
'A': [10, 12, 9, 10],
'B': [10, 9, 9, 10],
'D': [10, 2, 9, 10],
'F': [10, 23, 9, 10]
},
blob_types={
'A': core.DataType.INT32,
'B': core.DataType.INT32,
'D': core.DataType.INT32,
'F': core.DataType.INT32,
}
)
self.assertEqual(shapes['C'], [10, 21, 9, 10])
self.assertEqual(shapes['splits'], [2])
self.assertEqual(shapes['E'], [10, 23, 9, 10])
self.assertEqual(shapes['G'], [10, 23, 9, 2, 10])
self.assertEqual(types['C'], core.DataType.INT32)
self.assertEqual(types['splits'], core.DataType.INT32)
self.assertEqual(types['E'], core.DataType.INT32)
self.assertEqual(types['G'], core.DataType.INT32)
def testSqueeze(self):
net = core.Net("sq")
net.Squeeze(["data"], ["data_squeezed"], dims=[3, 1])
(shapes, types) = workspace.InferShapesAndTypes(
[net],
{'data': [64, 1, 96, 1, 4]}
)
self.assertEqual(shapes['data_squeezed'], [64, 96, 4])
def testCast(self):
model = model_helper.ModelHelper(name="test_model")
types = [
('bool', np.bool, caffe2_pb2.TensorProto.BOOL),
#('byte', None, caffe2_pb2.TensorProto.BYTE),
('int8', np.int8, caffe2_pb2.TensorProto.INT8),
('uint8', np.uint8, caffe2_pb2.TensorProto.UINT8),
('int16', np.int16, caffe2_pb2.TensorProto.INT16),
('uint16', np.uint16, caffe2_pb2.TensorProto.UINT16),
#('float16', np.float16, caffe2_pb2.TensorProto.FLOAT16),
('int32', np.int32, caffe2_pb2.TensorProto.INT32),
('float', np.float32, caffe2_pb2.TensorProto.FLOAT),
('int64', np.int64, caffe2_pb2.TensorProto.INT64),
('double', np.float64, caffe2_pb2.TensorProto.DOUBLE),
#('string', None, caffe2_pb2.TensorProto.STRING),
]
for (xstr, xnp, _) in types:
xname = 'X%s' % xstr
workspace.FeedBlob(xname, np.random.rand(1).astype(xnp))
for (ystr, _, yc2) in types:
yname = 'Y%s_to_%s' % (xstr, ystr)
model.Cast(xname, yname, to=yc2)
self.InferTensorRunAndCompare(model)
def testShapeInferenceRoiPool(self):
for is_test in [True, False]:
model = model_helper.ModelHelper(name="test_model")
outputs = ['Y'] if is_test else ['Y', 'argmaxes']
model.net.RoIPool(
['X', 'R'], outputs, pooled_h=4, pooled_w=5, is_test=is_test)
workspace.FeedBlob(
"X",
np.random.rand(100, 3, 4, 5).astype(np.float32))
workspace.FeedBlob(
"R",
np.random.rand(2, 5).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferencePow(self):
model = model_helper.ModelHelper(name="powtest")
model.Pow("x", 'y', exponent=-1.0)
workspace.FeedBlob('x', np.random.rand(1, 2, 3, 4).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testInt8Conversion(self):
model = model_helper.ModelHelper(name="fp32_int8_conversion_test")
model.FloatToFused8BitRowwiseQuantized('x', 'x_8bit')
model.Fused8BitRowwiseQuantizedToFloat('x_8bit', 'x_recovered')
workspace.FeedBlob('x', np.random.rand(100, 150).astype(np.float32))
self.InferTensorRunAndCompare(model)
x = workspace.FetchBlob('x')
x_recovered = workspace.FetchBlob('x_recovered')
# TODO: find a tighter bound
assert(np.allclose(x, x_recovered, atol=1e-2))
model = model_helper.ModelHelper(name="fp32_int8_conversion_test")
model.FloatToFused8BitRowwiseQuantizedHalfScaleBias('x', 'x_8bit')
model.Fused8BitRowwiseQuantizedHalfScaleBiasToFloat('x_8bit', 'x_recovered')
workspace.FeedBlob('x', np.random.rand(100, 150).astype(np.float32))
self.InferTensorRunAndCompare(model)
x = workspace.FetchBlob('x')
x_recovered = workspace.FetchBlob('x_recovered')
# TODO: find a tighter bound
assert(np.allclose(x, x_recovered, atol=1e-2))
def testHalfInt8Conversion(self):
model = model_helper.ModelHelper(name="fp16_int8_conversion_test")
model.HalfFloatToFused8BitRowwiseQuantized('x', 'x_8bit')
model.Fused8BitRowwiseQuantizedToHalfFloat('x_8bit', 'x_recovered')
workspace.FeedBlob('x', np.random.rand(100, 150).astype(np.float16))
self.InferTensorRunAndCompare(model)
x = workspace.FetchBlob('x')
x_recovered = workspace.FetchBlob('x_recovered')
# TODO: find a tighter bound
assert(np.allclose(x, x_recovered, atol=1e-2))
model = model_helper.ModelHelper(name="fp16_int8_conversion_test")
model.HalfFloatToFused8BitRowwiseQuantizedHalfScaleBias('x', 'x_8bit')
model.Fused8BitRowwiseQuantizedHalfScaleBiasToHalfFloat('x_8bit', 'x_recovered')
workspace.FeedBlob('x', np.random.rand(100, 150).astype(np.float16))
self.InferTensorRunAndCompare(model)
x = workspace.FetchBlob('x')
x_recovered = workspace.FetchBlob('x_recovered')
# TODO: find a tighter bound
assert(np.allclose(x, x_recovered, atol=1e-2))
def testLearningRateOp(self):
net = core.Net("lr_test")
iteration = net.ConstantFill(
[],
"iteration",
shape=[1],
value=0,
dtype=core.DataType.INT64,
)
lr = net.LearningRate(
[iteration],
net.NextScopedBlob("weight_decay"),
base_lr=0.5,
policy="constantWarmup",
multiplier=0.0,
num_iter=0,
)
(shapes, types) = workspace.InferShapesAndTypes(
[net],
)
self.assertEqual(shapes['weight_decay'], [1])
def testShapeOp(self):
model = model_helper.ModelHelper(name="shape_op_test")
model.Shape('x', 'y')
workspace.FeedBlob('x', np.random.rand(100, 150).astype(np.float32))
self.InferTensorRunAndCompare(model)
def InferTensorRunAndCompare(self, model, expected_uninferred_blobs=None):
'''
Runs shape inference, and then the model to check
that the inferred shapes agree with the actual ones
'expected_uninferred_blobs' is the list of blobs for which type and
shape cannot be inferred.
'''
(shapes, types) = workspace.InferShapesAndTypes(
[model.param_init_net, model.net],
)
# .. Create net
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
workspace.RunNet(model.Proto().name)
# ... and then check the shapes mismatch
correct_shapes = {}
correct_types = {}
for b in workspace.Blobs():
arr = workspace.FetchBlob(b)
correct_shapes[b] = arr.shape
if type(arr) is np.ndarray:
if arr.dtype == np.dtype('float32'):
correct_types[b] = caffe2_pb2.TensorProto.FLOAT
elif arr.dtype == np.dtype('int32'):
correct_types[b] = caffe2_pb2.TensorProto.INT32
# BYTE
# STRING
elif arr.dtype == np.dtype('bool'):
correct_types[b] = caffe2_pb2.TensorProto.BOOL
elif arr.dtype == np.dtype('uint8'):
correct_types[b] = caffe2_pb2.TensorProto.UINT8
elif arr.dtype == np.dtype('int8'):
correct_types[b] = caffe2_pb2.TensorProto.INT8
elif arr.dtype == np.dtype('uint16'):
correct_types[b] = caffe2_pb2.TensorProto.UINT16
elif arr.dtype == np.dtype('int16'):
correct_types[b] = caffe2_pb2.TensorProto.INT16
elif arr.dtype == np.dtype('int64'):
correct_types[b] = caffe2_pb2.TensorProto.INT64
elif arr.dtype == np.dtype('float16'):
correct_types[b] = caffe2_pb2.TensorProto.FLOAT16
elif arr.dtype == np.dtype('float64'):
correct_types[b] = caffe2_pb2.TensorProto.DOUBLE
else:
correct_types[b] = "unknown {}".format(arr.dtype)
else:
correct_types[b] = str(type(arr))
if expected_uninferred_blobs is None:
expected_uninferred_blobs = []
for b in correct_shapes:
# skip blobs for which shape couldn't be inferred
if b in expected_uninferred_blobs:
continue
self.assertTrue(
np.array_equal(
np.array(shapes[b]).astype(np.int32),
np.array(correct_shapes[b]).astype(np.int32)
),
"Shape {} mismatch: {} vs. correct {}".format(
b, shapes[b], correct_shapes[b]
)
)
self.assertFalse(
b not in types and b in correct_types,
"Type for {} not defined".format(b),
)
self.assertEqual(
types[b],
correct_types[b],
"Type {} mismatch: {} vs. {}".format(
b, types[b], correct_types[b],
)
)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/shape_inference_test.py
|
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given
class TestMarginLossL2rOps(hu.HypothesisTestCase):
def ref_margin_loss(self, y, r, margin):
n = len(y)
dy = np.zeros(n)
loss = 0
if np.sum(np.abs(r)) < 1e-6:
return loss, dy
for i in range(n):
for j in range(i + 1, n):
weight = 1.0 / n
diff = 1 if r[i] - r[j] > 0 else -1
if (margin > (y[i] - y[j]) * diff) and (r[i] != r[j]):
loss += weight * (margin - (y[i] - y[j]) * diff)
dy[i] += -diff * weight
dy[j] += diff * weight
return loss, dy
@given(
n=st.integers(10, 10),
k=st.integers(2, 5),
m=st.integers(1, 5),
**hu.gcs_cpu_only
)
def test_session_margin_loss(self, n, k, m, gc, dc):
y = np.random.rand(n * m).astype(np.float32)
r = np.random.randint(k, size=n * m).astype(np.float32)
# m sessions of length n
session_lengths = np.repeat(n, m).astype(np.int32)
ref_loss = np.empty(0)
ref_scale_loss = np.empty(0)
ref_dy = np.empty(0)
ref_scale_dy = np.empty(0)
for i in range(m):
r_loss, r_dy = self.ref_margin_loss(
y[(i) * n : (i + 1) * n], r[(i) * n : (i + 1) * n], 0.06
)
r_scale_loss, r_scale_dy = self.ref_margin_loss(
y[(i) * n : (i + 1) * n], r[(i) * n : (i + 1) * n], 0.04
)
ref_loss = np.append(ref_loss, r_loss)
ref_dy = np.append(ref_dy, r_dy)
ref_scale_loss = np.append(ref_scale_loss, r_scale_loss)
ref_scale_dy = np.append(ref_scale_dy, r_scale_dy)
dloss = np.random.random(m).astype(np.float32)
workspace.blobs["pred"] = y
workspace.blobs["label"] = r
workspace.blobs["session_lengths"] = session_lengths
workspace.blobs["dloss"] = dloss
# Test scale = 1
op = core.CreateOperator(
"SessionMarginLoss",
["pred", "label", "session_lengths"],
["loss", "dpred"],
margin=0.06,
)
workspace.RunOperatorOnce(op)
loss = workspace.blobs["loss"]
dy = workspace.blobs["dpred"]
np.testing.assert_allclose(loss, ref_loss, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(dy, ref_dy, rtol=1e-5, atol=1e-6)
name = op.output[0]
arr = workspace.FetchBlob(name)
self.assertGradientChecks(
gc, op, [y, r, session_lengths], 0, [0], stepsize=1e-3, threshold=2e-1
)
# Test scale > 1
op = core.CreateOperator(
"SessionMarginLoss",
["pred", "label", "session_lengths"],
["loss", "dpred"],
margin=0.04,
)
workspace.RunOperatorOnce(op)
loss = workspace.blobs["loss"]
dy = workspace.blobs["dpred"]
np.testing.assert_allclose(loss, ref_scale_loss, rtol=1e-5, atol=1e-6)
np.testing.assert_allclose(dy, ref_scale_dy, rtol=1e-5, atol=1e-6)
self.assertGradientChecks(
gc, op, [y, r, session_lengths], 0, [0], stepsize=1e-3, threshold=2e-1
)
|
pytorch-master
|
caffe2/python/operator_test/margin_loss_l2r_operator_test.py
|
from caffe2.python import core, workspace
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
class TestSoftmaxOps(serial.SerializedTestCase):
@serial.given(n=st.sampled_from([0, 2, 4, 71, 103]),
D=st.sampled_from([0, 4, 8, 64, 79, 256, 333]),
engine=st.sampled_from([None, 'CUDNN']),
**hu.gcs)
def test_softmax(self, n, D, engine, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Reference implementation of cross entropy with soft labels
def label_softmax(X):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
if D == 0:
return [probs]
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
return [probs]
op = core.CreateOperator(
"Softmax",
["X"],
["probs"],
engine=engine
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=label_softmax,
)
@given(n=st.sampled_from([0, 2, 4, 71, 103, 555, 751, 1201]),
D=st.sampled_from([0, 4, 8, 64, 79, 256, 333, 1000]),
engine=st.sampled_from([None, 'CUDNN']),
**hu.gcs)
@settings(deadline=10000)
def test_softmax_grad(self, n, D, engine, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
Y = np.random.rand(n, D).astype(np.float32)
dY = np.random.rand(n, D).astype(np.float32)
Y = Y + 1e-2
# Reference implementation of cross entropy with soft labels
def label_softmax_grad(X, dY):
dX = Y * 0.0
for i in range(n):
d = np.dot(Y[i, :], dY[i, :])
dX[i, :] = Y[i, :] * (dY[i, :] - d)
return [dX]
op = core.CreateOperator(
"SoftmaxGradient",
["Y", "dY"],
["dX"],
engine=engine
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[Y, dY],
reference=label_softmax_grad,
)
@given(axis=st.integers(min_value=1, max_value=4),
engine=st.sampled_from([None, 'CUDNN']),
**hu.gcs)
def test_softmax_axis(self, axis, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
X = X + 1e-2
def prod(xs):
p = 1
for x in xs:
p *= x
return p
N = prod(list(X.shape)[:axis])
D = prod(list(X.shape)[axis:])
# Reference implementation of cross entropy with soft labels
def label_softmax(X):
X_ = X.reshape(N, D)
probs = np.zeros((N, D))
rowmax = np.zeros(N)
for i in range(N):
rowmax[i] = max(X_[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X_[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
return [probs.reshape(*X.shape)]
op = core.CreateOperator(
"Softmax",
["X"],
["probs"],
axis=axis,
engine=engine,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=label_softmax,
)
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(2, 10), D=st.integers(4, 16),
only_loss=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_softmax_with_loss(self, n, D, gc, only_loss, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
np.random.seed(2603)
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Initialize label
label = (np.random.rand(n) * D).astype(np.int32)
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent(X, label):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
label_xent = [-np.log(max(probs[i][label[i]], 1e-20))
for i in range(n)]
avgloss = np.sum(label_xent) / float(n)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"],
only_loss=only_loss,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label],
reference=label_softmax_crossent,
)
self.assertGradientChecks(
gc, op, [X, label], 0, [1], stepsize=1e-4, threshold=1e-2)
@given(
n=st.integers(2, 5),
D=st.integers(4, 16),
only_loss=st.booleans(),
label_prob=st.booleans(),
**hu.gcs
)
@settings(deadline=10000)
def test_softmax_with_loss_axis_2(
self, n, D, only_loss, label_prob,
gc, dc
):
np.random.seed(2603)
X = np.random.rand(n, n, D).astype(np.float32)
X = X + 1e-2
if label_prob:
label = np.random.rand(n, n, D).astype(np.float32)
label /= label.sum(axis=2, keepdims=True)
else:
label = (np.random.rand(n, n) * D).astype(np.int32)
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent(X, label):
probs = np.zeros((n, n, D))
rowmax = np.zeros((n, n))
for i in range(n):
for j in range(n):
rowmax[i, j] = max(X[i, j, ])
# We need to subtract the max to avoid numerical issues
probs[i, j] = X[i, j] - rowmax[i, j]
exps = np.exp(probs[i, j, ])
norm = sum(exps)
probs[i, j, ] = exps / norm
label_xent = 0
for i in range(n):
for j in range(n):
if label_prob:
for k in range(D):
label_xent += (
-np.log(max(probs[i, j, k], 1e-20)) *
label[i, j, k]
)
else:
label_xent += -np.log(max(probs[i, j, label[i, j]], 1e-20))
avgloss = label_xent / float(n * n)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"],
only_loss=only_loss,
label_prob=label_prob,
axis=2,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label],
reference=label_softmax_crossent,
)
self.assertGradientChecks(
gc, op, [X, label], 0, [1], stepsize=1e-4, threshold=1e-2)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(**hu.gcs_gpu_only)
def test_softmax_with_loss_large(self, gc, dc):
np.random.seed(2603)
for n in [32]:
for D in [1000, 2000, 20000]:
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Initialize label
label = (np.random.rand(n) * D).astype(np.int32)
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent(X, label):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
label_xent = [-np.log(max(probs[i][label[i]], 1e-20))
for i in range(n)]
avgloss = np.sum(label_xent) / float(n)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label],
reference=label_softmax_crossent,
)
@given(n=st.integers(2, 10), D=st.integers(4, 16), **hu.gcs)
@settings(deadline=None)
def test_softmax_with_loss_label_prob(self, n, D, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
np.random.seed(2603)
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Initialize label
label = np.random.rand(D, n).astype(np.float32)
# normalize labels to sum to 1
label /= np.sum(label, axis=0)
label = label.transpose()
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent(X, label):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
label_xent = np.zeros(X.shape)
for i in range(n):
for j in range(D):
label_xent[i][j] = -np.log(
max(probs[i, j], 1e-20)) * label[i, j]
avgloss = np.sum(label_xent) / float(n)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"],
label_prob=1
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label],
reference=label_softmax_crossent,
)
self.assertGradientChecks(
gc, op, [X, label], 0, [1], stepsize=1e-4, threshold=1e-2)
@given(
n=st.integers(2, 10),
D=st.integers(4, 16),
only_loss=st.booleans(),
**hu.gcs)
@settings(deadline=None)
def test_softmax_with_loss_weighted(self, n, D, only_loss, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
np.random.seed(2603)
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Initialize label
label = (np.random.rand(n) * D).astype(np.int32)
# Init weights (weight by sample)
weights = np.random.rand(n).astype(np.float32)
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent_weighted(X, label, weights):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
label_xent = [-weights[i] * np.log(max(probs[i][label[i]], 1e-20))
for i in range(n)]
avgloss = np.sum(label_xent) / sum(weights)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label", "weights"],
["probs", "avgloss"],
only_loss=only_loss,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label, weights],
reference=label_softmax_crossent_weighted,
)
self.assertGradientChecks(
gc, op, [X, label, weights], 0, [1], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(2, 10), D=st.integers(4, 16), **hu.gcs)
@settings(deadline=None)
def test_softmax_with_loss_label_prob_weighted(self, n, D, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Initialize label
label = np.random.rand(D, n).astype(np.float32)
# normalize labels to sum to 1
label /= np.sum(label, axis=0)
label = label.transpose()
# Init weights (weight by sample)
weights = np.random.rand(n).astype(np.float32)
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent_weighted(X, label, weights):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
label_xent = np.zeros(X.shape)
for i in range(n):
for j in range(D):
label_xent[i][j] = -np.log(
max(probs[i, j], 1e-20)) * label[i, j] * weights[i]
avgloss = np.sum(label_xent) / sum(weights)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label", "weights"],
["probs", "avgloss"],
label_prob=1,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label, weights],
reference=label_softmax_crossent_weighted,
)
self.assertGradientChecks(
gc, op, [X, label, weights], 0, [1], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(2, 5), D=st.integers(2, 4),
weighted=st.booleans(), **hu.gcs)
@settings(deadline=None, max_examples=50)
def test_spatial_softmax_with_loss(self, n, D, weighted, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
W = 18
H = 12
np.random.seed(2603)
X = np.random.rand(n, D, H, W).astype(np.float32)
X = X + 1e-2
weighted = True
weights = None
if weighted:
weights = np.random.rand(n, H, W).astype(np.float32)
# Initialize label. Some of the labels are (-1), i.e "DONT CARE"
label = (np.random.rand(n, H, W) * (D + 1)).astype(np.int32) - 1
def label_softmax_crossent_spatial(X, label, weights=None):
probs = np.zeros((n, D, H, W))
rowmax = np.zeros((n, H, W))
label_xent = np.zeros((n, H, W))
for i in range(n):
for x in range(W):
for y in range(H):
rowmax[i, y, x] = max(X[i, :, y, x])
# We need to subtract the max to avoid numerical issues
probs[i, :, y, x] = X[i, :, y, x] - rowmax[i, y, x]
exps = np.exp(probs[i, :, y, x])
probs[i, :, y, x] = exps / sum(exps)
label_xent[:, y, x] = \
[-np.log(max(probs[j, label[i, y, x], y, x], 1e-20))
for j in range(n)]
total_xent = 0.0
total_weight = 0.0
for y in range(H):
for x in range(W):
for i in range(n):
l = label[i, y, x]
if (l != (-1)):
w = 1.0 if weights is None else weights[i, y, x]
total_xent += \
-np.log(max(probs[i, l, y, x], 1e-20)) * w
total_weight += w
print("Total weight {}".format(total_weight))
return (probs, total_xent / total_weight)
op = core.CreateOperator(
"SpatialSoftmaxWithLoss",
["X", "label"] + ([] if weights is None else ["weights"]),
["probs", "avgloss"],
)
inputs = [X, label] + ([] if weights is None else [weights])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=label_softmax_crossent_spatial,
)
self.assertGradientChecks(
gc, op, inputs, 0, [1], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(4, 5), D=st.integers(3, 4),
weighted=st.booleans(), **hu.gcs)
def test_spatial_softmax_with_loss_allignore(self, n, D, weighted, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
W = 18
H = 12
np.random.seed(2603)
X = np.random.rand(n, D, H, W).astype(np.float32)
X = X + 1e-2
weighted = True
weights = None
if weighted:
weights = np.random.rand(n, H, W).astype(np.float32)
# Initialize label. All labels as "DONT CARE"
label = np.zeros((n, H, W)).astype(np.int32) - 1
print(label)
def label_softmax_crossent_spatial(X, label, weights=None):
probs = np.zeros((n, D, H, W))
rowmax = np.zeros((n, H, W))
label_xent = np.zeros((n, H, W))
for i in range(n):
for x in range(W):
for y in range(H):
rowmax[i, y, x] = max(X[i, :, y, x])
# We need to subtract the max to avoid numerical issues
probs[i, :, y, x] = X[i, :, y, x] - rowmax[i, y, x]
exps = np.exp(probs[i, :, y, x])
probs[i, :, y, x] = exps / sum(exps)
label_xent[:, y, x] = \
[-np.log(max(probs[j, label[i, y, x], y, x], 1e-20))
for j in range(n)]
return (probs, 0.0)
op = core.CreateOperator(
"SpatialSoftmaxWithLoss",
["X", "label"] + ([] if weights is None else ["weights"]),
["probs", "avgloss"],
)
inputs = [X, label] + ([] if weights is None else [weights])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=label_softmax_crossent_spatial,
)
@given(n=st.integers(4, 5), D=st.integers(3, 4),
weighted=st.booleans(), **hu.gcs)
def test_softmax_with_loss_zero_weight(self, n, D, weighted, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
np.random.seed(2603)
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
weights = np.zeros(n).astype(np.float32)
# Initialize label
label = (np.random.rand(n) * D).astype(np.int32)
def label_softmax_crossent(X, label, weights=None):
probs = np.zeros((n, D))
rowmax = np.zeros((n))
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
return (probs, 0.0)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label", "weights"],
["probs", "avgloss"]
)
inputs = [X, label] + ([] if weights is None else [weights])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=label_softmax_crossent,
)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
def test_compare_cpugpu(self):
'''
Additional test that checks CPU and GPU returns same values
with larger examples. This is mainly to test the more complex
GPU implementation is correct.
'''
from caffe2.proto import caffe2_pb2
for _j in range(3):
gpuop = core.CreateOperator(
"SpatialSoftmaxWithLoss",
["X_gpu", "label_gpu"],
["probs_gpu", "avgloss_gpu"],
device_option=core.DeviceOption(workspace.GpuDeviceType, 0)
)
cpuop = core.CreateOperator(
"SpatialSoftmaxWithLoss",
["X_cpu", "label_cpu"],
["probs_cpu", "avgloss_cpu"],
device_option=core.DeviceOption(caffe2_pb2.CPU)
)
n = 8
D = 4
W = 64 + int(np.random.rand(1) * 1024)
H = 64 + int(np.random.rand(1) * 1024)
print("W: {} H: {}".format(W, H))
X = np.random.rand(n, D, H, W).astype(np.float32)
X = X + 1e-2
# Initialize label. Some of the labels are (-1), i.e "DONT CARE"
label = (np.random.rand(n, H, W) * (D + 1)).astype(np.int32) - 1
gpu0 = core.DeviceOption(workspace.GpuDeviceType, 0)
workspace.FeedBlob("X_cpu", X)
workspace.FeedBlob("label_cpu", label)
workspace.FeedBlob("X_gpu", X, device_option=gpu0)
workspace.FeedBlob("label_gpu", label, device_option=gpu0)
workspace.RunOperatorOnce(gpuop)
workspace.RunOperatorOnce(cpuop)
probs_gpu = workspace.FetchBlob("probs_gpu")
probs_cpu = workspace.FetchBlob("probs_cpu")
loss_gpu = workspace.FetchBlob("avgloss_gpu")
loss_cpu = workspace.FetchBlob("avgloss_cpu")
np.testing.assert_allclose(probs_gpu, probs_cpu, rtol=1e-4)
np.testing.assert_allclose(loss_gpu, loss_cpu, rtol=1e-1)
if __name__ == "__main__":
import unittest
import random
random.seed(2603)
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/softmax_ops_test.py
|
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import copy
class RoIAlignRotatedOp(hu.HypothesisTestCase):
def bbox_xywh_to_xyxy(self, boxes):
"""
Convert from [center_x center_y w h] format to [x1 y1 x2 y2].
"""
w, h = boxes[:, 2], boxes[:, 3]
boxes[:, 0] -= w / 2.0 # x1 = center_x - width/2
boxes[:, 1] -= h / 2.0 # y1 = center_y - height/2
boxes[:, 2] = boxes[:, 0] + w # x2 = x1 + width
boxes[:, 3] = boxes[:, 1] + h # y2 = y1 + height
return boxes
@given(
H=st.integers(min_value=50, max_value=100),
W=st.integers(min_value=50, max_value=100),
C=st.integers(min_value=1, max_value=3),
num_rois=st.integers(min_value=0, max_value=10),
pooled_size=st.sampled_from([7, 14]),
**hu.gcs
)
def test_horizontal_rois(self, H, W, C, num_rois, pooled_size, gc, dc):
"""
Test that results match with RoIAlign when angle=0.
"""
X = np.random.randn(1, C, H, W).astype(np.float32)
R = np.zeros((num_rois, 6)).astype(np.float32)
angle = 0.0
for i in range(num_rois):
x = np.random.uniform(1, W - 1)
y = np.random.uniform(1, H - 1)
w = np.random.uniform(1, min(x, W - x))
h = np.random.uniform(1, min(y, H - y))
R[i] = [0, x, y, w, h, angle]
op = core.CreateOperator(
"RoIAlignRotated",
["X", "R"],
["Y"],
pooled_h=pooled_size,
pooled_w=pooled_size,
sampling_ratio=0,
)
def roialign_ref(X, R):
# Remove angle and convert from [center_x center_y w h]
# to [x1 y1 x2 y2] format.
R_ref = copy.deepcopy(R[:, 0:5])
R_ref[:, 1:5] = self.bbox_xywh_to_xyxy(R_ref[:, 1:5])
ref_op = core.CreateOperator(
"RoIAlign",
["X_ref", "R_ref"],
["Y_ref"],
pooled_h=pooled_size,
pooled_w=pooled_size,
sampling_ratio=0,
)
workspace.FeedBlob("X_ref", X)
workspace.FeedBlob("R_ref", R_ref)
workspace.RunOperatorOnce(ref_op)
return [workspace.FetchBlob("Y_ref")]
self.assertReferenceChecks(
device_option=gc, op=op, inputs=[X, R], reference=roialign_ref
)
if core.IsGPUDeviceType(gc.device_type):
self.assertGradientChecks(gc, op, [X, R], 0, [0])
@given(
H=st.integers(min_value=50, max_value=100),
W=st.integers(min_value=50, max_value=100),
C=st.integers(min_value=1, max_value=3),
num_rois=st.integers(min_value=0, max_value=10),
pooled_size=st.sampled_from([7, 14]),
angle=st.sampled_from([-270, -180, -90, 90, 180, 270]),
**hu.gcs
)
def test_simple_rotations(
self, H, W, C, num_rois, pooled_size, angle, gc, dc
):
"""
Test with right-angled rotations that don't need interpolation.
"""
X = np.random.randn(1, C, H, W).astype(np.float32)
R = np.zeros((num_rois, 6)).astype(np.float32)
for i in range(num_rois):
x = np.random.uniform(1, W - 1)
y = np.random.uniform(1, H - 1)
w = np.random.uniform(1, min(x, W - x, y, H - y))
h = np.random.uniform(1, min(x, W - x, y, H - y))
R[i] = [0, x, y, w, h, angle]
op = core.CreateOperator(
"RoIAlignRotated",
["X", "R"],
["Y"],
pooled_h=pooled_size,
pooled_w=pooled_size,
sampling_ratio=0,
)
def roialign_rot90(m, k=1, axes=(0,1)):
axes = tuple(axes)
if len(axes) != 2:
raise ValueError("len(axes) must be 2.")
m = np.asanyarray(m)
if axes[0] == axes[1] or np.absolute(axes[0] - axes[1]) == m.ndim:
raise ValueError("Axes must be different.")
if (axes[0] >= m.ndim or axes[0] < -m.ndim or
axes[1] >= m.ndim or axes[1] < -m.ndim):
raise ValueError(
"Axes={} out of range for array of ndim={}.".format(axes, m.ndim))
k %= 4
if k == 0:
return m[:]
if k == 2:
return roialign_flip(roialign_flip(m, axes[0]), axes[1])
axes_list = np.arange(0, m.ndim)
(axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]],
axes_list[axes[0]])
if k == 1:
return np.transpose(roialign_flip(m,axes[1]), axes_list)
else:
# k == 3
return roialign_flip(np.transpose(m, axes_list), axes[1])
def roialign_flip(m, axis):
if not hasattr(m, 'ndim'):
m = np.asarray(m)
indexer = [slice(None)] * m.ndim
try:
indexer[axis] = slice(None, None, -1)
except IndexError:
raise ValueError("axis=%i is invalid for the %i-dimensional input array"
% (axis, m.ndim))
return m[tuple(indexer)]
def roialign_ref(X, R):
# `angle` denotes counter-clockwise rotation. Rotate the input
# feature map in the opposite (clockwise) direction and perform
# standard RoIAlign. We assume all RoIs have the same angle.
#
# Also note that we need to have our own version of np.rot90,
# since axes isn't an argument until 1.12.0 and doesn't exist
# on all tested platforms.
norm_angle = (angle + 360) % 360
X_ref = roialign_rot90(X, k=-norm_angle / 90, axes=(2, 3))
# Rotate RoIs clockwise wrt the center of the input feature
# map to make them horizontal and convert from
# [center_x center_y w h] to [x1 y1 x2 y2] format.
roi_x, roi_y = R[:, 1], R[:, 2]
if norm_angle == 90:
new_roi_x = H - roi_y - 1
new_roi_y = roi_x
elif norm_angle == 180:
new_roi_x = W - roi_x - 1
new_roi_y = H - roi_y - 1
elif norm_angle == 270:
new_roi_x = roi_y
new_roi_y = W - roi_x - 1
else:
raise NotImplementedError
R_ref = copy.deepcopy(R[:, 0:5])
R_ref[:, 1], R_ref[:, 2] = new_roi_x, new_roi_y
R_ref[:, 1:5] = self.bbox_xywh_to_xyxy(R_ref[:, 1:5])
ref_op = core.CreateOperator(
"RoIAlign",
["X_ref", "R_ref"],
["Y_ref"],
pooled_h=pooled_size,
pooled_w=pooled_size,
sampling_ratio=0,
)
workspace.FeedBlob("X_ref", X_ref)
workspace.FeedBlob("R_ref", R_ref)
workspace.RunOperatorOnce(ref_op)
return [workspace.FetchBlob("Y_ref")]
self.assertReferenceChecks(
device_option=gc, op=op, inputs=[X, R], reference=roialign_ref
)
if core.IsGPUDeviceType(gc.device_type):
self.assertGradientChecks(gc, op, [X, R], 0, [0])
if __name__ == '__main__':
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/roi_align_rotated_op_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
@st.composite
def _glu_old_input(draw):
dims = draw(st.lists(st.integers(min_value=1, max_value=5), min_size=1, max_size=3))
axis = draw(st.integers(min_value=0, max_value=len(dims)))
# The axis dimension must be divisible by two
axis_dim = 2 * draw(st.integers(min_value=1, max_value=2))
dims.insert(axis, axis_dim)
X = draw(hu.arrays(dims, np.float32, None))
return (X, axis)
class TestGlu(serial.SerializedTestCase):
@given(
X_axis=_glu_old_input(),
**hu.gcs
)
@settings(deadline=10000)
def test_glu_old(self, X_axis, gc, dc):
X, axis = X_axis
def glu_ref(X):
x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)
Y = x1 * (1. / (1. + np.exp(-x2)))
return [Y]
op = core.CreateOperator("Glu", ["X"], ["Y"], dim=axis)
self.assertReferenceChecks(gc, op, [X], glu_ref)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/glu_op_test.py
|
import struct
import unittest
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import torch
from caffe2.python import core, workspace
from hypothesis import given, settings
from scipy.stats import norm
def generate_rois(roi_counts, im_dims):
assert len(roi_counts) == len(im_dims)
all_rois = []
for i, num_rois in enumerate(roi_counts):
if num_rois == 0:
continue
# [batch_idx, x1, y1, x2, y2]
rois = np.random.uniform(0, im_dims[i], size=(roi_counts[i], 5)).astype(
np.float32
)
rois[:, 0] = i # batch_idx
# Swap (x1, x2) if x1 > x2
rois[:, 1], rois[:, 3] = (
np.minimum(rois[:, 1], rois[:, 3]),
np.maximum(rois[:, 1], rois[:, 3]),
)
# Swap (y1, y2) if y1 > y2
rois[:, 2], rois[:, 4] = (
np.minimum(rois[:, 2], rois[:, 4]),
np.maximum(rois[:, 2], rois[:, 4]),
)
all_rois.append(rois)
if len(all_rois) > 0:
return np.vstack(all_rois)
return np.empty((0, 5)).astype(np.float32)
def generate_rois_rotated(roi_counts, im_dims):
rois = generate_rois(roi_counts, im_dims)
# [batch_id, ctr_x, ctr_y, w, h, angle]
rotated_rois = np.empty((rois.shape[0], 6)).astype(np.float32)
rotated_rois[:, 0] = rois[:, 0] # batch_id
rotated_rois[:, 1] = (rois[:, 1] + rois[:, 3]) / 2.0 # ctr_x = (x1 + x2) / 2
rotated_rois[:, 2] = (rois[:, 2] + rois[:, 4]) / 2.0 # ctr_y = (y1 + y2) / 2
rotated_rois[:, 3] = rois[:, 3] - rois[:, 1] + 1.0 # w = x2 - x1 + 1
rotated_rois[:, 4] = rois[:, 4] - rois[:, 2] + 1.0 # h = y2 - y1 + 1
rotated_rois[:, 5] = np.random.uniform(-90.0, 90.0) # angle in degrees
return rotated_rois
def create_bbox_transform_inputs(roi_counts, num_classes, rotated):
batch_size = len(roi_counts)
total_rois = sum(roi_counts)
im_dims = np.random.randint(100, 600, batch_size)
rois = (
generate_rois_rotated(roi_counts, im_dims)
if rotated
else generate_rois(roi_counts, im_dims)
)
box_dim = 5 if rotated else 4
deltas = np.random.randn(total_rois, box_dim * num_classes).astype(np.float32)
im_info = np.zeros((batch_size, 3)).astype(np.float32)
im_info[:, 0] = im_dims
im_info[:, 1] = im_dims
im_info[:, 2] = 1.0
return rois, deltas, im_info
# Eigen/Python round 0.5 away from 0, Numpy rounds to even
round_to_nearest = np.vectorize(round)
def bytes_to_floats(byte_matrix):
floats = np.empty([np.shape(byte_matrix)[0], 1], dtype=np.float32)
for i, byte_values in enumerate(byte_matrix):
(floats[i],) = struct.unpack("f", bytearray(byte_values))
return floats
def floats_to_bytes(floats):
byte_matrix = np.empty([np.shape(floats)[0], 4], dtype=np.uint8)
for i, value in enumerate(floats):
assert isinstance(value, np.float32), (value, floats)
as_bytes = struct.pack("f", value)
# In Python3 bytes will be a list of int, in Python2 a list of string
if isinstance(as_bytes[0], int):
byte_matrix[i] = list(as_bytes)
else:
byte_matrix[i] = [ord(i) for i in as_bytes]
return byte_matrix
def fused_rowwise_8bit_quantize_reference(data):
minimum = np.min(data, axis=1, keepdims=True)
maximum = np.max(data, axis=1, keepdims=True)
span = maximum - minimum
bias = minimum
scale = span / 255.0
inverse_scale = 255.0 / (span + 1e-8)
quantized_data = round_to_nearest((data - bias) * inverse_scale)
scale_bytes = floats_to_bytes(scale.reshape(-1))
bias_bytes = floats_to_bytes(bias.reshape(-1))
return np.concatenate([quantized_data, scale_bytes, bias_bytes], axis=1)
def fused_rowwise_8bit_quantize_dequantize_reference(data):
fused_quantized = fused_rowwise_8bit_quantize_reference(data)
scale = bytes_to_floats(fused_quantized[:, -8:-4].astype(np.uint8))
bias = bytes_to_floats(fused_quantized[:, -4:].astype(np.uint8))
quantized_data = fused_quantized[:, :-8]
return quantized_data * scale + bias
class TorchIntegration(hu.HypothesisTestCase):
@given(
roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10),
num_classes=st.integers(1, 10),
rotated=st.booleans(),
angle_bound_on=st.booleans(),
clip_angle_thresh=st.sampled_from([-1.0, 1.0]),
**hu.gcs_cpu_only
)
def test_bbox_transform(
self,
roi_counts,
num_classes,
rotated,
angle_bound_on,
clip_angle_thresh,
gc,
dc,
):
"""
Test with rois for multiple images in a batch
"""
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, rotated
)
def bbox_transform_ref():
ref_op = core.CreateOperator(
"BBoxTransform",
["rois", "deltas", "im_info"],
["box_out"],
apply_scale=False,
rotated=rotated,
angle_bound_on=angle_bound_on,
clip_angle_thresh=clip_angle_thresh,
)
workspace.FeedBlob("rois", rois)
workspace.FeedBlob("deltas", deltas)
workspace.FeedBlob("im_info", im_info)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("box_out")
box_out = torch.tensor(bbox_transform_ref())
a, b = torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
False,
rotated,
angle_bound_on,
-90,
90,
clip_angle_thresh,
legacy_plus_one=True,
)
torch.testing.assert_allclose(box_out, a)
@given(
roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10),
num_classes=st.integers(1, 10),
rotated=st.booleans(),
angle_bound_on=st.booleans(),
clip_angle_thresh=st.sampled_from([-1.0, 1.0]),
batch_splits_dtype=st.sampled_from([torch.float32, torch.int32]),
**hu.gcs_cpu_only
)
def test_box_with_nms_limits(
self,
roi_counts,
num_classes,
rotated,
angle_bound_on,
clip_angle_thresh,
batch_splits_dtype,
gc,
dc,
):
rotated = False # FIXME remove this after rotation is supported
rois, deltas, im_info = create_bbox_transform_inputs(
roi_counts, num_classes, rotated
)
pred_bbox, batch_splits = [
t.detach().numpy()
for t in torch.ops._caffe2.BBoxTransform(
torch.tensor(rois),
torch.tensor(deltas),
torch.tensor(im_info),
[1.0, 1.0, 1.0, 1.0],
False,
rotated,
angle_bound_on,
-90,
90,
clip_angle_thresh,
legacy_plus_one=True,
)
]
class_prob = np.random.randn(sum(roi_counts), num_classes).astype(np.float32)
score_thresh = 0.5
nms_thresh = 0.5
topk_per_image = sum(roi_counts) / 2
def box_with_nms_limit_ref():
input_blobs = ["class_prob", "pred_bbox", "batch_splits"]
output_blobs = [
"score_nms",
"bbox_nms",
"class_nms",
"batch_splits_nms",
"keeps_nms",
"keeps_size_nms",
]
ref_op = core.CreateOperator(
"BoxWithNMSLimit",
input_blobs,
output_blobs,
score_thresh=float(score_thresh),
nms=float(nms_thresh),
detections_per_im=int(topk_per_image),
soft_nms_enabled=False,
soft_nms_method="linear",
soft_nms_sigma=0.5,
soft_nms_min_score_thres=0.001,
rotated=rotated,
)
workspace.FeedBlob("class_prob", class_prob)
workspace.FeedBlob("pred_bbox", pred_bbox)
workspace.FeedBlob("batch_splits", batch_splits)
workspace.RunOperatorOnce(ref_op)
return (workspace.FetchBlob(b) for b in output_blobs)
output_refs = box_with_nms_limit_ref()
outputs = torch.ops._caffe2.BoxWithNMSLimit(
torch.tensor(class_prob),
torch.tensor(pred_bbox),
torch.tensor(batch_splits, dtype=batch_splits_dtype),
score_thresh=float(score_thresh),
nms=float(nms_thresh),
detections_per_im=int(topk_per_image),
soft_nms_enabled=False,
soft_nms_method="linear",
soft_nms_sigma=0.5,
soft_nms_min_score_thres=0.001,
rotated=rotated,
cls_agnostic_bbox_reg=False,
input_boxes_include_bg_cls=True,
output_classes_include_bg_cls=True,
legacy_plus_one=True,
)
for o, o_ref in zip(outputs, output_refs):
torch.testing.assert_allclose(o, o_ref)
@given(
dim_1=st.integers(min_value=10, max_value=10),
dim_2=st.integers(min_value=3, max_value=3),
dim_3=st.integers(min_value=2, max_value=2),
)
def test_sparse_to_dense_mask(self, dim_1, dim_2, dim_3):
indices = np.array([i + 1 for i in range(dim_1)]).astype(np.int32)
values = np.random.rand(dim_1, dim_2, dim_3).astype(np.float32)
default_value = np.zeros((dim_2, dim_3)).astype(np.float32)
mask = [2, 4, 9]
def sparse_to_dense_mask_ref(return_presence_mask=False):
ref_op = core.CreateOperator(
"SparseToDenseMask",
["indices", "values", "default_value"],
["output", "presence_mask"],
mask=mask,
return_presence_mask=return_presence_mask,
)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("values", values)
workspace.FeedBlob("default_value", default_value)
workspace.RunOperatorOnce(ref_op)
if return_presence_mask:
return (
workspace.FetchBlob("output"),
workspace.FetchBlob("presence_mask"),
)
return workspace.FetchBlob("output")
# Testing return_presence_mask = False
output = sparse_to_dense_mask_ref()
output = torch.tensor(output)
a, _ = torch.ops._caffe2.SparseToDenseMask(
torch.tensor(indices),
torch.tensor(values),
torch.tensor(default_value),
None,
mask=mask,
)
torch.testing.assert_allclose(output, a)
# Testing return_presence_mask = True
output, presence_mask = sparse_to_dense_mask_ref(return_presence_mask=True)
output = torch.tensor(output)
presence_mask = torch.tensor(presence_mask)
a, b = torch.ops._caffe2.SparseToDenseMask(
torch.tensor(indices),
torch.tensor(values),
torch.tensor(default_value),
None,
mask=mask,
return_presence_mask=True,
)
torch.testing.assert_allclose(output, a)
torch.testing.assert_allclose(presence_mask, b)
@given(
A=st.integers(min_value=4, max_value=4),
H=st.integers(min_value=10, max_value=10),
W=st.integers(min_value=8, max_value=8),
img_count=st.integers(min_value=3, max_value=3),
)
def test_generate_proposals(self, A, H, W, img_count):
scores = np.ones((img_count, A, H, W)).astype(np.float32)
bbox_deltas = (
np.linspace(0, 10, num=img_count * 4 * A * H * W)
.reshape((img_count, 4 * A, H, W))
.astype(np.float32)
)
im_info = np.ones((img_count, 3)).astype(np.float32) / 10
anchors = np.ones((A, 4)).astype(np.float32)
def generate_proposals_ref():
ref_op = core.CreateOperator(
"GenerateProposals",
["scores", "bbox_deltas", "im_info", "anchors"],
["rois", "rois_probs"],
spatial_scale=2.0,
)
workspace.FeedBlob("scores", scores)
workspace.FeedBlob("bbox_deltas", bbox_deltas)
workspace.FeedBlob("im_info", im_info)
workspace.FeedBlob("anchors", anchors)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("rois"), workspace.FetchBlob("rois_probs")
rois, rois_probs = generate_proposals_ref()
rois = torch.tensor(rois)
rois_probs = torch.tensor(rois_probs)
a, b = torch.ops._caffe2.GenerateProposals(
torch.tensor(scores),
torch.tensor(bbox_deltas),
torch.tensor(im_info),
torch.tensor(anchors),
2.0,
6000,
300,
0.7,
16,
True,
-90,
90,
1.0,
legacy_plus_one=True,
)
torch.testing.assert_allclose(rois, a)
torch.testing.assert_allclose(rois_probs, b)
@given(
bsz=st.integers(1, 5),
seq_lens=st.integers(1, 6),
emb_lens=st.integers(5, 10),
hidden_size=st.integers(3, 7),
num_layers=st.integers(1, 4),
has_biases=st.booleans(),
is_bidirectional=st.booleans(),
batch_first=st.booleans(),
)
def test_inference_lstm(
self,
bsz,
seq_lens,
emb_lens,
hidden_size,
num_layers,
has_biases,
is_bidirectional,
batch_first,
):
num_directions = 2 if is_bidirectional else 1
hx = np.zeros((num_layers * num_directions, bsz, hidden_size), dtype=np.float32)
if batch_first:
inputs = np.random.randn(bsz, seq_lens, emb_lens).astype(np.float32)
else:
inputs = np.random.randn(seq_lens, bsz, emb_lens).astype(np.float32)
torch_lstm = torch.nn.LSTM(
emb_lens,
hidden_size,
batch_first=batch_first,
bidirectional=is_bidirectional,
bias=has_biases,
num_layers=num_layers,
)
def inference_lstm_ref():
input_names = ["inputs", "hidden_0", "hidden_1"]
workspace.FeedBlob("inputs", inputs)
workspace.FeedBlob("hidden_0", hx)
workspace.FeedBlob("hidden_1", hx)
for i, param in enumerate(torch_lstm._flat_weights):
input_names.append("param_{}".format(i))
workspace.FeedBlob("param_{}".format(i), param.detach().numpy())
ref_op = core.CreateOperator(
"InferenceLSTM",
input_names,
["output", "hidden", "cell"],
num_layers=num_layers,
has_biases=has_biases,
batch_first=batch_first,
bidirectional=is_bidirectional,
)
workspace.RunOperatorOnce(ref_op)
return (
workspace.FetchBlob("output"),
workspace.FetchBlob("hidden"),
workspace.FetchBlob("cell"),
)
output, hidden, cell = inference_lstm_ref()
output = torch.tensor(output)
hidden = torch.tensor(hidden)
cell = torch.tensor(cell)
lstm_in = [
torch.from_numpy(inputs),
torch.from_numpy(hx),
torch.from_numpy(hx),
] + [param.detach() for param in torch_lstm._flat_weights]
a, b, c = torch.ops._caffe2.InferenceLSTM(
lstm_in, num_layers, has_biases, batch_first, is_bidirectional
)
torch.testing.assert_allclose(output, a)
torch.testing.assert_allclose(hidden, b)
torch.testing.assert_allclose(cell, c)
# Test case is using workspace.has_cuda_support and not workspace.has_gpu_support
# to exclude it from HIP because tensor interop doesn't work for HIP tensors yet
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
@given(
A=st.integers(min_value=4, max_value=4),
H=st.integers(min_value=10, max_value=10),
W=st.integers(min_value=8, max_value=8),
img_count=st.integers(min_value=3, max_value=3),
)
def test_generate_proposals_cuda(self, A, H, W, img_count):
scores = np.ones((img_count, A, H, W)).astype(np.float32)
bbox_deltas = (
np.linspace(0, 10, num=img_count * 4 * A * H * W)
.reshape((img_count, 4 * A, H, W))
.astype(np.float32)
)
im_info = np.ones((img_count, 3)).astype(np.float32) / 10
anchors = np.ones((A, 4)).astype(np.float32)
def generate_proposals_ref():
ref_op = core.CreateOperator(
"GenerateProposals",
["scores", "bbox_deltas", "im_info", "anchors"],
["rois", "rois_probs"],
spatial_scale=2.0,
)
workspace.FeedBlob("scores", scores)
workspace.FeedBlob("bbox_deltas", bbox_deltas)
workspace.FeedBlob("im_info", im_info)
workspace.FeedBlob("anchors", anchors)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("rois"), workspace.FetchBlob("rois_probs")
rois, rois_probs = generate_proposals_ref()
rois = torch.tensor(rois)
rois_probs = torch.tensor(rois_probs)
a, b = torch.ops._caffe2.GenerateProposals(
torch.tensor(scores).cuda(),
torch.tensor(bbox_deltas).cuda(),
torch.tensor(im_info).cuda(),
torch.tensor(anchors).cuda(),
2.0,
6000,
300,
0.7,
16,
True,
-90,
90,
1.0,
legacy_plus_one=True,
)
torch.testing.assert_allclose(rois, a.cpu())
torch.testing.assert_allclose(rois_probs, b.cpu())
@given(
N=st.integers(min_value=1, max_value=2),
C=st.integers(min_value=4, max_value=4),
H=st.integers(min_value=10, max_value=10),
W=st.integers(min_value=8, max_value=8),
)
def _test_roi_align(self, N, C, H, W, device):
def rand_roi():
return np.array(
[
float(int(N * np.random.rand())),
0.5 * np.random.rand() * W,
0.5 * np.random.rand() * H,
(0.5 + 0.5 * np.random.rand()) * W,
(0.5 + 0.5 * np.random.rand()) * H,
]
).astype(np.float32)
feature = np.random.randn(N, C, H, W).astype(np.float32)
rois = np.array([rand_roi() for _ in range(10)])
def roi_align_ref(_feature, _rois):
ref_op = core.CreateOperator(
"RoIAlign",
["feature", "rois"],
["roi_feature"],
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=0,
)
workspace.FeedBlob("feature", _feature)
workspace.FeedBlob("rois", _rois)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("roi_feature")
roi_feature_ref = roi_align_ref(feature, rois)
roi_feature = torch.ops._caffe2.RoIAlign(
torch.tensor(feature).to(device),
torch.tensor(rois).to(device),
order="NCHW",
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=0,
aligned=False,
)
torch.testing.assert_allclose(roi_feature_ref, roi_feature.cpu())
def test_roi_align_cpu(self):
self._test_roi_align(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_roi_align_cuda(self):
self._test_roi_align(device="cuda")
@given(
N=st.integers(min_value=1, max_value=2),
C=st.integers(min_value=4, max_value=4),
H=st.integers(min_value=10, max_value=10),
W=st.integers(min_value=8, max_value=8),
)
def _test_roi_align_rotated(self, N, C, H, W, device):
def rand_rotated_roi():
return np.array(
[
float(int(N * np.random.rand())),
np.random.rand() * W,
np.random.rand() * H,
np.random.rand() * W,
np.random.rand() * H,
np.random.rand() * 360 - 180,
]
).astype(np.float32)
feature = np.random.randn(N, C, H, W).astype(np.float32)
rois = np.array([rand_rotated_roi() for _ in range(10)])
def roi_align_ref(_feature, _rois):
ref_op = core.CreateOperator(
"RoIAlignRotated",
["feature", "rois"],
["roi_feature"],
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=0,
)
workspace.FeedBlob("feature", _feature)
workspace.FeedBlob("rois", _rois)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("roi_feature")
roi_feature_ref = roi_align_ref(feature, rois)
roi_feature = torch.ops._caffe2.RoIAlignRotated(
torch.tensor(feature).to(device),
torch.tensor(rois).to(device),
order="NCHW",
spatial_scale=1.0,
pooled_h=3,
pooled_w=3,
sampling_ratio=0,
aligned=False,
)
torch.testing.assert_allclose(roi_feature_ref, roi_feature.cpu())
def test_roi_align_rotated_cpu(self):
self._test_roi_align_rotated(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_roi_align_rotated_cuda(self):
self._test_roi_align_rotated(device="cuda")
@given(roi_counts=st.lists(st.integers(0, 5), min_size=1, max_size=10))
def test_collect_and_distribute_fpn_rpn_proposals_op(self, roi_counts):
batch_size = len(roi_counts)
im_dims = np.random.randint(100, 600, batch_size)
rpn_rois_and_scores = []
for i in range(5):
rpn_rois_and_scores.append(torch.tensor(generate_rois(roi_counts, im_dims)))
for i in range(5):
rpn_rois_and_scores.append(torch.rand(sum(roi_counts)))
rois = torch.ops._caffe2.CollectRpnProposals(
rpn_rois_and_scores,
rpn_max_level=6,
rpn_min_level=2,
rpn_post_nms_topN=sum(roi_counts),
)
fpn_outputs = torch.ops._caffe2.DistributeFpnProposals(
rois,
roi_canonical_scale=224,
roi_canonical_level=4,
roi_max_level=5,
roi_min_level=2,
legacy_plus_one=True,
)
all_outputs = torch.ops._caffe2.CollectAndDistributeFpnRpnProposals(
rpn_rois_and_scores,
roi_canonical_scale=224,
roi_canonical_level=4,
roi_max_level=5,
roi_min_level=2,
rpn_max_level=6,
rpn_min_level=2,
rpn_post_nms_topN=sum(roi_counts),
legacy_plus_one=True,
)
rois_fpn_list = fpn_outputs[:-1]
rois_idx_restore_int32 = fpn_outputs[-1]
# [rois] + fpn_outputs should be equal to all_outputs
torch.testing.assert_allclose(rois, all_outputs[0])
for x, y in zip(fpn_outputs, all_outputs[1:]):
torch.testing.assert_allclose(x, y)
@given(X=hu.tensor(), fast_gelu=st.booleans())
def _test_gelu_op(self, X, fast_gelu, device):
def _gelu_ref(_X):
return (_X * norm.cdf(_X).astype(np.float32),)
(expected_output,) = _gelu_ref(X)
actual_output = torch.ops._caffe2.Gelu(torch.tensor(X), fast_gelu)
rtol = 1e-3 if fast_gelu else 1e-4
atol = 1e-5
torch.testing.assert_allclose(
expected_output, actual_output.cpu(), rtol=rtol, atol=atol
)
def test_gelu_op(self):
self._test_gelu_op(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_gelu_op_cuda(self):
self._test_gelu_op(device="cuda")
@given(
inputs=hu.lengths_tensor(
dtype=np.float32, min_value=1, max_value=5, allow_empty=True
)
)
def _test_lengths_op(self, inputs, ref_op_name, torch_op, device):
data, lengths = inputs
def _lengths_ref(X, Y):
ref_op = core.CreateOperator(ref_op_name, ["X", "Y"], "out")
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("out")
expected_output = _lengths_ref(data, lengths)
actual_output = torch_op(
torch.tensor(data), torch.tensor(lengths, dtype=torch.int32)
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def _test_lengths_sum_op(self, device):
self._test_lengths_op("LengthsSum", torch.ops._caffe2.LengthsSum, device)
def test_lengths_sum_op(self):
self._test_lengths_sum_op(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_lengths_sum_op_cuda(self):
self._test_lengths_sum_op(device="cuda")
def _test_lengths_mean_op(self, device):
self._test_lengths_op("LengthsMean", torch.ops._caffe2.LengthsMean, device)
def test_lengths_mean_op(self):
self._test_lengths_mean_op(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_lengths_mean_op_cuda(self):
self._test_lengths_mean_op(device="cuda")
def _test_lengths_max_op(self, device):
self._test_lengths_op("LengthsMax", torch.ops._caffe2.LengthsMax, device)
def test_lengths_max_op(self):
self._test_lengths_max_op(device="cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_lengths_max_op_cuda(self):
self._test_lengths_max_op(device="cuda")
def _test_resize_nearest_op(self, device):
data = np.random.rand(1, 2, 3, 4).astype(np.float32)
def _resize_nearest_ref(X):
ref_op = core.CreateOperator(
"ResizeNearest",
["X"],
["Y"],
width_scale=2.0,
height_scale=1.5,
order="NCHW",
)
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = _resize_nearest_ref(data)
actual_output = torch.ops._caffe2.ResizeNearest(
torch.tensor(data).to(device),
order="NCHW",
width_scale=2.0,
height_scale=1.5,
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def test_resize_nearest_op_cpu(self):
return self._test_resize_nearest_op("cpu")
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_resize_nearest_op_cuda(self):
return self._test_resize_nearest_op("cuda")
@given(input_data=hu.tensor(min_dim=2, max_dim=2))
def test_Fused8BitRowwiseQuantizedToFloat(self, input_data):
QuantizeOp = core.CreateOperator(
"FloatToFused8BitRowwiseQuantized", ["input_data"], ["quantized_data"]
)
workspace.FeedBlob("input_data", input_data)
workspace.RunOperatorOnce(QuantizeOp)
quantized_data = workspace.FetchBlob("quantized_data")
dequantized_data = torch.ops._caffe2.Fused8BitRowwiseQuantizedToFloat(
torch.tensor(quantized_data)
)
reference = fused_rowwise_8bit_quantize_dequantize_reference(input_data)
np.testing.assert_array_almost_equal(dequantized_data.numpy(), reference)
@given(binary_input=st.booleans())
def test_piecewise_linear_op(self, binary_input):
if binary_input:
num_dims = 1
else:
num_dims = 3
data = np.random.rand(1024, num_dims).astype(np.float32)
slopes = np.zeros(4 * num_dims).astype(np.float32)
bounds = np.sort(
np.random.rand(5, num_dims).astype(np.float32), axis=0
).flatten("F")
intercepts = np.random.rand(4 * num_dims).astype(np.float32)
def _piecewise_linear_ref(X):
ref_op = core.CreateOperator(
"PiecewiseLinearTransform",
["data", "bounds", "slopes", "intercepts"],
["calibrated"],
binary=binary_input,
)
workspace.FeedBlob("data", X)
workspace.FeedBlob("bounds", bounds)
workspace.FeedBlob("slopes", slopes)
workspace.FeedBlob("intercepts", intercepts)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("calibrated")
expected_output = _piecewise_linear_ref(data)
actual_output = torch.ops._caffe2.PiecewiseLinearTransform(
torch.tensor(data),
bounds.tolist(),
slopes.tolist(),
intercepts.tolist(),
binary_input,
)
torch.testing.assert_allclose(torch.tensor(expected_output), actual_output)
def test_alias_with_name_is_in_place(self):
device = "cuda" if workspace.has_cuda_support else "cpu"
x = torch.tensor([3., 42.]).to(device=device)
y = torch.ops._caffe2.AliasWithName(x, "new_name")
x[1] = 6
torch.testing.assert_allclose(x, torch.tensor([3., 6.]).to(device=device))
# y should also change because y is alias of x
torch.testing.assert_allclose(y, torch.tensor([3., 6.]).to(device=device))
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
def test_copy_between_cpu_and_gpu(self):
x_cpu_ref = torch.tensor([1., 2., 3.])
x_gpu_ref = x_cpu_ref.to("cuda")
x_gpu = torch.ops._caffe2.CopyCPUToGPU(x_cpu_ref)
torch.testing.assert_allclose(x_gpu, x_gpu_ref)
x_cpu = torch.ops._caffe2.CopyGPUToCPU(x_gpu)
torch.testing.assert_allclose(x_cpu, x_cpu_ref)
def test_index_hash_op(self):
data = np.random.randint(low=0, high=1000, size=(4, 4, 4))
def _index_hash_ref(X):
ref_op = core.CreateOperator("IndexHash", ["X"], ["Y"], seed=0, modulo=100)
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = _index_hash_ref(data)
actual_output = torch.ops._caffe2.IndexHash(
torch.tensor(data), seed=0, modulo=100
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def test_bucketize_op(self):
data = np.random.rand(8, 10).astype(np.float32) * 1000
boundaries = np.array([1, 10, 100, 1000, 100000]).astype(np.float32)
def _bucketize_ref(X):
ref_op = core.CreateOperator(
"Bucketize", ["X"], ["Y"], boundaries=boundaries
)
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = _bucketize_ref(data)
actual_output = torch.ops._caffe2.Bucketize(torch.tensor(data), boundaries)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
@given(X=hu.tensor(), eps=st.floats(min_value=1e-4, max_value=1e-2))
def test_logit(self, X, eps):
def ref(X, eps):
ref_op = core.CreateOperator("Logit", ["X"], ["Y"], eps=eps)
workspace.FeedBlob("X", X)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = ref(X, eps)
actual_output = torch.ops._caffe2.Logit(torch.tensor(X), eps)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def test_percentile(self):
original_values = np.array([[3.0, 5.0, 3], [5.0, 1.0, 6.0]]).astype(np.float32)
value_to_pct = np.array([[3, 0.2], [5, 0.5], [1, 0.3], [3, 0.6]]).astype(
np.float32
)
lengths = np.array([2, 1, 1]).astype(np.int32)
def _percentile_ref(original_values, value_to_pct, lengths):
ref_op = core.CreateOperator(
"Percentile", ["original_values", "value_to_pct", "lengths"], ["Y"]
)
workspace.FeedBlob("original_values", original_values)
workspace.FeedBlob("value_to_pct", value_to_pct)
workspace.FeedBlob("lengths", lengths)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = _percentile_ref(original_values, value_to_pct, lengths)
actual_output = torch.ops._caffe2.Percentile(
torch.tensor(original_values),
torch.tensor(value_to_pct),
torch.tensor(lengths),
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def test_batch_bucket_one_hot_op(self):
data = np.array([[2, 3], [4, 1], [2, 5]]).astype(np.float32)
lengths = np.array([2, 3]).astype(np.int32)
boundaries = np.array([0.1, 2.5, 1, 3.1, 4.5]).astype(np.float32)
def _batch_bucket_one_hot_ref(data, lengths, boundaries):
ref_op = core.CreateOperator(
"BatchBucketOneHot", ["data", "lengths", "boundaries"], ["Y"]
)
workspace.FeedBlob("data", data)
workspace.FeedBlob("lengths", lengths)
workspace.FeedBlob("boundaries", boundaries)
workspace.RunOperatorOnce(ref_op)
return workspace.FetchBlob("Y")
expected_output = _batch_bucket_one_hot_ref(data, lengths, boundaries)
actual_output = torch.ops._caffe2.BatchBucketOneHot(
torch.tensor(data), torch.tensor(lengths), torch.tensor(boundaries)
)
torch.testing.assert_allclose(expected_output, actual_output.cpu())
def test_gather_ranges_to_dense_op(self):
data = np.array([1, 2, 3, 4, 5, 6, 7, 8])
ranges = np.array([[[2, 4]], [[0, 0]]])
key = np.array([0, 1, 3, 2, 1, 0, 1, 0])
lengths = np.array([4])
min_observation = 2
max_mismatched_ratio = 0.5
max_empty_ratio = 1.0
outputs_name = ["X_{}".format(i) for i in range(len(lengths))]
ref_op = core.CreateOperator(
"GatherRangesToDense",
["data", "ranges", "key"],
outputs_name,
lengths=lengths,
min_observation=min_observation,
max_mismatched_ratio=max_mismatched_ratio,
max_empty_ratio=max_empty_ratio,
)
workspace.FeedBlob("data", data)
workspace.FeedBlob("ranges", ranges)
workspace.FeedBlob("key", key)
workspace.RunOperatorOnce(ref_op)
ref_outputs = []
for output_name in outputs_name:
ref_outputs.append(workspace.FetchBlob(output_name))
outputs = torch.ops._caffe2.GatherRangesToDense(
torch.from_numpy(data),
torch.from_numpy(ranges),
torch.from_numpy(key),
lengths=lengths,
min_observation=min_observation,
max_mismatched_ratio=max_mismatched_ratio,
max_empty_ratio=max_empty_ratio,
)
self.assertEqual(len(ref_outputs), len(outputs))
for i in range(0, len(ref_outputs)):
np.testing.assert_array_almost_equal(ref_outputs[i], outputs[i].numpy())
@given(lengths_0=st.integers(1, 10), lengths_1=st.integers(1, 10))
@settings(deadline=10000)
def test_merge_id_lists(self, lengths_0, lengths_1):
def _merge_id_lists(lengths, values):
ref_op = core.CreateOperator(
"MergeIdLists",
["lengths_0", "values_0", "lengths_1", "values_1"],
["merged_lengths", "merged_values"],
)
workspace.FeedBlob("lengths_0", lengths[0])
workspace.FeedBlob("values_0", values[0])
workspace.FeedBlob("lengths_1", lengths[1])
workspace.FeedBlob("values_1", values[1])
workspace.RunOperatorOnce(ref_op)
return (
workspace.FetchBlob("merged_lengths"),
workspace.FetchBlob("merged_values"),
)
lengths = [
np.array([lengths_0]).astype(np.int32),
np.array([lengths_1]).astype(np.int32),
]
values = [
np.random.choice(np.arange(0, 10), size=lengths_0, replace=False).astype(
np.int32
),
np.random.choice(np.arange(10, 20), size=lengths_1, replace=False).astype(
np.int32
),
]
expected_merged_lengths, expected_merged_values = _merge_id_lists(
lengths, values
)
output_merged_lengths, output_merged_values = torch.ops._caffe2.MergeIdLists(
[
torch.tensor(lengths[0]),
torch.tensor(values[0]),
torch.tensor(lengths[1]),
torch.tensor(values[1]),
]
)
torch.testing.assert_allclose(expected_merged_lengths, output_merged_lengths)
torch.testing.assert_allclose(expected_merged_values, output_merged_values)
def test_learning_rate(self):
base_lr = 0.05
no_iter = torch.tensor([0])
one_iter = torch.tensor([1])
two_iter = torch.tensor([2])
# Fixed policy
self.assertEqual(
base_lr,
torch.ops._caffe2.LearningRate(
iterations=no_iter, base_lr=base_lr, policy="fixed"
),
)
self.assertEqual(
base_lr,
torch.ops._caffe2.LearningRate(
iterations=one_iter, base_lr=base_lr, policy="fixed"
),
)
# Step policy
gamma = 0.99
stepsize = 1
self.assertEqual(
base_lr,
torch.ops._caffe2.LearningRate(
iterations=no_iter,
base_lr=base_lr,
policy="step",
stepsize=stepsize,
gamma=gamma,
),
)
self.assertAlmostEqual(
base_lr * (gamma ** (1.0 / stepsize)),
torch.ops._caffe2.LearningRate(
iterations=one_iter,
base_lr=base_lr,
policy="step",
stepsize=stepsize,
gamma=gamma,
),
)
self.assertAlmostEqual(
base_lr * (gamma ** (2.0 / stepsize)),
torch.ops._caffe2.LearningRate(
iterations=two_iter,
base_lr=base_lr,
policy="step",
stepsize=stepsize,
gamma=gamma,
),
)
def test_pack_segments(self):
s = torch.rand(3, 3, 3)
lengths = torch.tensor([2, 1])
packed_tensor, _ = torch.ops._caffe2.PackSegments(lengths, s)
self.assertEqual(packed_tensor.numpy().shape, (2, 2, 3, 3))
unpacked_tensor = torch.ops._caffe2.UnpackSegments(lengths, packed_tensor)
torch.testing.assert_allclose(s, unpacked_tensor)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/torch_integration_test.py
|
from typing import List
import hypothesis.strategies as st
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import bisect
import numpy as np
class TestBisectPercentileOp(hu.HypothesisTestCase):
def compare_reference(
self,
raw_data,
pct_raw_data,
pct_mapping,
pct_upper,
pct_lower,
lengths,
):
def bisect_percentile_op_ref(
raw_data,
pct_raw_data,
pct_mapping,
pct_lower,
pct_upper,
lengths
):
results = np.zeros_like(raw_data)
indices = [0]
for j in range(len(lengths)):
indices.append(indices[j] + lengths[j])
for i in range(len(raw_data)):
for j in range(len(raw_data[0])):
start = indices[j]
end = indices[j + 1]
val = raw_data[i][j]
pct_raw_data_i = pct_raw_data[start:end]
pct_lower_i = pct_lower[start:end]
pct_upper_i = pct_upper[start:end]
pct_mapping_i = pct_mapping[start:end]
# Corner cases
if val < pct_raw_data_i[0]:
results[i][j] = 0
continue
if val > pct_raw_data_i[-1]:
results[i][j] = 1.
continue
# interpolation
k = bisect.bisect_left(pct_raw_data_i, val)
if pct_raw_data_i[k] == val:
results[i][j] = pct_mapping_i[k]
else:
k = k - 1
slope = ((pct_lower_i[k + 1] - pct_upper_i[k])
/ (pct_raw_data_i[k + 1] - pct_raw_data_i[k]))
results[i][j] = pct_upper_i[k] + \
slope * (val - pct_raw_data_i[k])
return results
workspace.ResetWorkspace()
workspace.FeedBlob("raw_data", raw_data)
op = core.CreateOperator(
"BisectPercentile",
["raw_data"],
["pct_output"],
percentile_raw=pct_raw_data,
percentile_mapping=pct_mapping,
percentile_lower=pct_lower,
percentile_upper=pct_upper,
lengths=lengths
)
workspace.RunOperatorOnce(op)
expected_output = bisect_percentile_op_ref(
raw_data,
pct_raw_data,
pct_mapping,
pct_lower,
pct_upper,
lengths
)
output = workspace.blobs['pct_output']
np.testing.assert_array_almost_equal(output, expected_output)
def test_bisect_percentil_op_simple(self):
raw_data = np.array([
[1, 1],
[2, 2],
[3, 3],
[3, 1],
[9, 10],
[1.5, 5],
[1.32, 2.4],
[2.9, 5.7],
[-1, -1],
[3, 7]
], dtype=np.float32)
pct_raw_data = np.array([1, 2, 3, 2, 7], dtype=np.float32)
pct_lower = np.array([0.1, 0.2, 0.9, 0.1, 0.5], dtype=np.float32)
pct_upper = np.array([0.1, 0.8, 1.0, 0.4, 1.0], dtype=np.float32)
pct_mapping = np.array([0.1, 0.5, 0.95, 0.25, 0.75], dtype=np.float32)
lengths = np.array([3, 2], dtype=np.int32)
self.compare_reference(
raw_data, pct_raw_data, pct_mapping, pct_lower, pct_upper, lengths)
@given(
N=st.integers(min_value=20, max_value=100),
lengths_in=st.lists(
elements=st.integers(min_value=2, max_value=10),
min_size=2,
max_size=5,
),
max_value=st.integers(min_value=100, max_value=1000),
discrete=st.booleans(),
p=st.floats(min_value=0, max_value=0.9),
**hu.gcs_cpu_only
)
def test_bisect_percentil_op_large(
self, N: int, lengths_in: List[int], max_value: int, discrete: bool, p: float, gc, dc
):
lengths = np.array(lengths_in, dtype=np.int32)
D = len(lengths)
if discrete:
raw_data = np.random.randint(0, max_value, size=(N, D))
else:
raw_data = np.random.randn(N, D)
# To generate valid pct_lower and pct_upper
pct_lower = []
pct_upper = []
pct_raw_data = []
for i in range(D):
pct_lower_val = 0.
pct_upper_val = 0.
pct_lower_cur = []
pct_upper_cur = []
# There is no duplicated values in pct_raw_data
if discrete:
pct_raw_data_cur = np.random.choice(
np.arange(max_value), size=lengths[i], replace=False)
else:
pct_raw_data_cur = np.random.randn(lengths[i])
while len(set(pct_raw_data_cur)) < lengths[i]:
pct_raw_data_cur = np.random.randn(lengths[i])
pct_raw_data_cur = np.sort(pct_raw_data_cur)
for _ in range(lengths[i]):
pct_lower_val = pct_upper_val + 0.01
pct_lower_cur.append(pct_lower_val)
pct_upper_val = pct_lower_val + \
0.01 * np.random.randint(1, 20) * (np.random.uniform() < p)
pct_upper_cur.append(pct_upper_val)
# normalization
pct_lower_cur = np.array(pct_lower_cur, np.float32) / pct_upper_val
pct_upper_cur = np.array(pct_upper_cur, np.float32) / pct_upper_val
pct_lower.extend(pct_lower_cur)
pct_upper.extend(pct_upper_cur)
pct_raw_data.extend(pct_raw_data_cur)
pct_lower = np.array(pct_lower, dtype=np.float32)
pct_upper = np.array(pct_upper, dtype=np.float32)
pct_mapping = (pct_lower + pct_upper) / 2.
raw_data = np.array(raw_data, dtype=np.float32)
pct_raw_data = np.array(pct_raw_data, dtype=np.float32)
self.compare_reference(
raw_data, pct_raw_data, pct_mapping, pct_lower, pct_upper, lengths)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/bisect_percentile_op_test.py
|
from caffe2.python import core, workspace
from hypothesis import given, assume, settings
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import unittest
class TestElementwiseOps(hu.HypothesisTestCase):
@given(X=hu.tensor(dtype=np.float32), **hu.gcs)
@settings(deadline=10000)
def test_abs(self, X, gc, dc):
op = core.CreateOperator(
"Abs",
["X"],
["Y"],
)
def abs_ref(X):
return [np.absolute(X)]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=abs_ref,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0], ensure_outputs_are_inferred=True)
@given(X=hu.tensor(dtype=np.float32), inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_exp(self, X, inplace, gc, dc):
op = core.CreateOperator(
"Exp",
["X"],
["X"] if inplace else ["Y"],
)
def exp_ref(X):
return [np.exp(X)]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=exp_ref,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0], ensure_outputs_are_inferred=True)
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_log(self, n, m, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m).astype(np.float32) + 1.0
def log_op(X):
return [np.log(X)]
op = core.CreateOperator(
"Log",
["X"],
["Z"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=log_op,
ensure_outputs_are_inferred=True,
)
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2,
ensure_outputs_are_inferred=True)
@given(n=st.integers(0, 10), m=st.integers(4, 6),
d=st.integers(2, 3), seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_powt(self, n, m, d, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m, d).astype(np.float32) + 1.0
Y = np.random.rand(n, m, d).astype(np.float32) + 2.0
def powt_op(X, Y):
return [np.power(X, Y)]
#two gradients Y*X^(Y-1) and X^Y * ln(X)
def powt_grad(g_out, outputs, fwd_inputs):
[X, Y] = fwd_inputs
Z = outputs[0]
return ([Y * np.power(X, Y - 1), Z * np.log(X)] * g_out)
op = core.CreateOperator(
"Pow",
["X", "Y"],
["Z"]
)
self.assertReferenceChecks(device_option=gc,
op=op,
inputs=[X, Y],
reference=powt_op,
output_to_grad="Z",
grad_reference=powt_grad,
ensure_outputs_are_inferred=True)
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_sqr(self, n, m, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m).astype(np.float32)
def sqr_op(X):
return [np.square(X)]
op = core.CreateOperator(
"Sqr",
["X"],
["Z"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sqr_op,
ensure_outputs_are_inferred=True,
)
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2,
ensure_outputs_are_inferred=True)
@given(
X=hu.tensor(
elements=hu.floats(min_value=0.1, max_value=10),
# allow empty tensor
min_value=0),
inplace=st.booleans(),
**hu.gcs
)
@settings(deadline=10000)
def test_sqrt(self, X, inplace, gc, dc):
def sqrt_op(X):
return [np.sqrt(X)]
op = core.CreateOperator(
"Sqrt",
["X"],
["X"] if inplace else ["Y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sqrt_op,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
# stepsize need to be smaller than the possible minimum X, so the
# sqrt is well defined
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-2, ensure_outputs_are_inferred=True)
@given(X=hu.tensor(dtype=np.float32), inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_softsign(self, X, inplace, gc, dc):
op = core.CreateOperator(
"Softsign",
["X"],
["X"] if inplace else ["Y"],
)
def softsign_ref(X):
return [X / (1.0 + np.absolute(X))]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=softsign_ref,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
if not inplace:
self.assertGradientChecks(
gc, op, [X], 0, [0],
ensure_outputs_are_inferred=True,
)
@given(X=hu.tensor(elements=hu.floats(min_value=0.1, max_value=10.0), dtype=np.float32),
inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_rsqrt(self, X, inplace, gc, dc):
op = core.CreateOperator(
"Rsqrt",
["X"],
["X"] if inplace else ["Y"],
)
def rsqrt_ref(X):
return [1.0 / np.sqrt(X)]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=rsqrt_ref,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=5e-3,
ensure_outputs_are_inferred=True,
)
@given(X=hu.tensor(dtype=np.float32), **hu.gcs)
@settings(deadline=10000)
def test_cube(self, X, gc, dc):
op = core.CreateOperator(
"Cube",
["X"],
["Y"],
)
def cube_ref(X):
return [np.power(X, 3)]
def cube_grad_ref(g_out, outputs, fwd_inputs):
dY = g_out
[X] = fwd_inputs
return [dY * np.square(X) * 3]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=cube_ref,
output_to_grad="Y",
grad_reference=cube_grad_ref,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
@given(X=hu.tensor(dtype=np.float32), in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_cbrt(self, X, in_place, gc, dc):
op = core.CreateOperator(
"Cbrt",
["X"],
["X"] if in_place else ["Y"],
)
def cbrt_ref(X):
return [np.cbrt(X)]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=cbrt_ref,
ensure_outputs_are_inferred=True,
)
@given(X=hu.tensor(elements=hu.floats(min_value=1.0, max_value=10.0), dtype=np.float32),
in_place=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_cbrt_grad(self, X, in_place, gc, dc):
op = core.CreateOperator(
"Cbrt",
["X"],
["X"] if in_place else ["Y"],
)
self.assertGradientChecks(
gc, op, [X], 0, [0],
ensure_outputs_are_inferred=True,
)
self.assertGradientChecks(
gc, op, [-X], 0, [0],
ensure_outputs_are_inferred=True,
)
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_swish(self, n, m, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m).astype(np.float32)
def swish(X):
return [np.divide(X, (1. + np.exp(-X)))]
op = core.CreateOperator(
"Swish",
["X"],
["Z"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=swish,
ensure_outputs_are_inferred=True,
)
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2,
ensure_outputs_are_inferred=True)
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_swish_gradient_inplace(self, n, m, gc, dc, seed):
np.random.seed(seed)
def swish(X):
return [np.divide(X, (1. + np.exp(-X)))]
def swish_gradient(X, Y, dY):
return [dY * (Y + np.divide(1. - Y, 1. + np.exp(-X)))]
X = np.random.rand(n, m).astype(np.float32)
Y = swish(X)[0]
dY = np.random.rand(n, m).astype(np.float32)
op = core.CreateOperator(
"SwishGradient",
["X", "Y", "grad"],
"grad"
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y, dY],
reference=swish_gradient,
)
@given(n=st.integers(1, 6),
m=st.integers(4, 6),
inplace=st.booleans(),
allow_broadcast_fastpath=st.booleans(),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_mul_gradient_inplace_or_broadcast(
self,
n: int,
m: int,
inplace: bool,
allow_broadcast_fastpath: bool,
gc,
dc,
seed: int,
):
broadcast = not inplace
np.random.seed(seed)
def mul_gradient(dC, A, B):
dA = B * dC
dB = A * dC
if broadcast:
dB = np.sum(dB, axis=0)
return [dA, dB]
A = np.random.rand(n, m).astype(np.float32)
if broadcast:
B = np.random.rand(m).astype(np.float32)
else:
B = np.random.rand(n, m).astype(np.float32)
dC = np.random.rand(n, m).astype(np.float32)
op_dA_inplace = core.CreateOperator(
"MulGradient",
["dC", "A", "B"],
["dC" if inplace else "dA", "dB"],
allow_broadcast_fastpath=allow_broadcast_fastpath,
)
op_dB_inplace = core.CreateOperator(
"MulGradient",
["dC", "A", "B"],
["dA", "dC" if inplace else "dB"],
allow_broadcast_fastpath=allow_broadcast_fastpath,
)
self.assertReferenceChecks(
device_option=gc,
op=op_dA_inplace,
inputs=[dC, A, B],
reference=mul_gradient,
)
self.assertReferenceChecks(
device_option=gc,
op=op_dB_inplace,
inputs=[dC, A, B],
reference=mul_gradient,
)
@given(n=st.integers(1, 6),
m=st.integers(4, 6),
inplace=st.booleans(),
allow_broadcast_fastpath=st.booleans(),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_div_gradient_inplace_or_broadcast(
self,
n: int,
m: int,
inplace: bool,
allow_broadcast_fastpath: bool,
gc,
dc,
seed: int,
):
broadcast = not inplace
np.random.seed(seed)
def div_gradient(dC, _A, B, C):
dA = dC / B
dB = -dC * C / B
if broadcast:
dB = np.sum(dB, axis=0)
return [dA, dB]
A = np.random.rand(n, m).astype(np.float32)
if broadcast:
B = np.random.rand(m).astype(np.float32) + 1.0
else:
B = np.random.rand(n, m).astype(np.float32) + 1.0
C = A / B
dC = np.random.rand(n, m).astype(np.float32)
op = core.CreateOperator(
"DivGradient",
["dC", "A", "B", "C"],
["dC" if inplace else "dA", "dB"],
allow_broadcast_fastpath=allow_broadcast_fastpath,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[dC, A, B, C],
reference=div_gradient,
)
@given(n=st.integers(1, 6),
m=st.integers(4, 6),
inplace=st.booleans(),
allow_broadcast_fastpath=st.booleans(),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_add_gradient_inplace_or_broadcast(
self,
n: int,
m: int,
inplace: bool,
allow_broadcast_fastpath: bool,
gc,
dc,
seed: int,
):
broadcast = not inplace
np.random.seed(seed)
def add_gradient(dC, _A, _B):
dA, dB = dC, dC
if broadcast:
dB = np.sum(dB, axis=0)
return [dA, dB]
A = np.random.rand(n, m).astype(np.float32)
if broadcast:
B = np.random.rand(m).astype(np.float32)
else:
B = np.random.rand(n, m).astype(np.float32)
dC = np.random.rand(n, m).astype(np.float32)
op_dA_inplace = core.CreateOperator(
"AddGradient",
["dC", "A", "B"],
["dC" if inplace else "dA", "dB"],
allow_broadcast_fastpath=allow_broadcast_fastpath,
)
op_dB_inplace = core.CreateOperator(
"AddGradient",
["dC", "A", "B"],
["dA", "dC" if inplace else "dB"],
allow_broadcast_fastpath=allow_broadcast_fastpath,
)
self.assertReferenceChecks(
device_option=gc,
op=op_dA_inplace,
inputs=[dC, A, B],
reference=add_gradient,
)
self.assertReferenceChecks(
device_option=gc,
op=op_dB_inplace,
inputs=[dC, A, B],
reference=add_gradient,
)
@given(n=st.integers(1, 6),
m=st.integers(4, 6),
inplace=st.booleans(),
allow_broadcast_fastpath=st.booleans(),
seed=st.integers(0, 1000), **hu.gcs)
@settings(deadline=10000)
def test_sub_gradient_inplace_or_broadcast(
self,
n: int,
m: int,
inplace: bool,
allow_broadcast_fastpath: bool,
gc,
dc,
seed: int,
):
broadcast = not inplace
np.random.seed(seed)
def sub_gradient(dC, _A, _B):
dA, dB = dC, -dC
if broadcast:
dB = np.sum(dB, axis=0)
return [dA, dB]
A = np.random.rand(n, m).astype(np.float32)
if broadcast:
B = np.random.rand(m).astype(np.float32)
else:
B = np.random.rand(n, m).astype(np.float32)
dC = np.random.rand(n, m).astype(np.float32)
op_dA_inplace = core.CreateOperator(
"SubGradient",
["dC", "A", "B"],
["dC" if inplace else "dA", "dB"],
allow_broadcast_fastpath=allow_broadcast_fastpath,
)
op_dB_inplace = core.CreateOperator(
"SubGradient",
["dC", "A", "B"],
["dA", "dC" if inplace else "dB"],
allow_broadcast_fastpath=allow_broadcast_fastpath,
)
self.assertReferenceChecks(
device_option=gc,
op=op_dA_inplace,
inputs=[dC, A, B],
reference=sub_gradient,
)
self.assertReferenceChecks(
device_option=gc,
op=op_dB_inplace,
inputs=[dC, A, B],
reference=sub_gradient,
)
@given(X=hu.tensor(dtype=np.float32), inplace=st.booleans(),
engine=st.sampled_from(["", "CUDNN"]), **hu.gcs)
@settings(deadline=10000)
def test_sigmoid(self, X, inplace, engine, gc, dc):
op = core.CreateOperator(
"Sigmoid",
["X"],
["X"] if inplace else ["Y"],
engine=engine,
)
def sigmoid_ref(X):
return [1.0 / (1.0 + np.exp(-X))]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sigmoid_ref,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0], ensure_outputs_are_inferred=True)
@given(X=hu.tensor(dtype=np.float32), inplace=st.booleans(),
engine=st.sampled_from(["", "CUDNN"]), **hu.gcs)
@settings(deadline=10000)
def test_tanh(self, X, inplace, engine, gc, dc):
op = core.CreateOperator(
"Tanh",
["X"],
["X"] if inplace else ["Y"],
engine=engine,
)
def tanh_ref(X):
return [np.tanh(X)]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=tanh_ref,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0], ensure_outputs_are_inferred=True)
@given(X=hu.tensor(dtype=np.float32),
inplace=st.booleans(),
alpha=hu.floats(min_value=-100.0, max_value=100.0),
beta=hu.floats(min_value=-100.0, max_value=100.0),
engine=st.sampled_from([""]),
**hu.gcs)
@settings(deadline=10000)
def test_hard_sigmoid(self, X, inplace, alpha, beta, engine, gc, dc):
# Prevent alpha and beta from mutually being 0 to avoid a division
# error when adjusting our inputs
assume(alpha != 0.0 or beta != 0.0)
op = core.CreateOperator(
"HardSigmoid",
["X"],
["X"] if inplace else ["Y"],
alpha=alpha,
beta=beta,
engine=engine,
)
def hard_sigmoid_ref(X):
return [np.minimum(1.0, np.maximum(0.0, X * alpha + beta))]
# Adjust inputs to avoid differentitating at inflection points
if abs(alpha) > 0.001:
Y = X * alpha + beta
Y += 0.04 * np.sign(Y)
Y[Y == 0.0] += 0.1
Y[Y == 1.0] -= 0.1
X = (Y - beta) / alpha
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=hard_sigmoid_ref,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2,
ensure_outputs_are_inferred=True)
@given(n=st.integers(0, 6), m=st.integers(4, 6), **hu.gcs)
@settings(deadline=10000)
def test_eq(self, n, m, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.randint(2, size=(n, m))
Y = np.random.randint(2, size=(n, m))
op = core.CreateOperator("EQ", ["X", "Y"], "out", broadcast=1)
def eq(X, Y):
return [X == Y]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=eq,
ensure_outputs_are_inferred=True,
)
workspace.FeedBlob('X', X)
workspace.FeedBlob('Y', Y)
net = core.Net("batch_bucket_one_hot_test")
result = net.EQ(["X", "Y"], 1)
(shapes, types) = workspace.InferShapesAndTypes([net])
workspace.RunNetOnce(net)
self.assertEqual(shapes[result], list(workspace.blobs[result].shape))
self.assertEqual(shapes[result], list(X.shape))
self.assertEqual(types[result], core.DataType.BOOL)
@given(n=st.integers(0, 6), m=st.integers(4, 6), **hu.gcs)
@settings(deadline=10000)
def test_eq_bcast(self, n, m, gc, dc):
# Set broadcast and no axis, i.e. broadcasting last dimensions.
X = np.random.randint(2, size=(n, m))
Y = np.random.randint(2, size=(m,))
op = core.CreateOperator("EQ", ["X", "Y"], "out", broadcast=1)
def eq(X, Y):
return [X == Y]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=eq,
ensure_outputs_are_inferred=True,
)
workspace.FeedBlob('X', X)
workspace.FeedBlob('Y', Y)
net = core.Net("eq_bast")
result = net.EQ(["X", "Y"], 1, broadcast=1)
(shapes, types) = workspace.InferShapesAndTypes([net])
workspace.RunNetOnce(net)
self.assertTrue(str(result) in shapes)
self.assertEqual(shapes[result], list(workspace.blobs[result].shape))
self.assertEqual(shapes[result], list(X.shape))
self.assertEqual(types[result], core.DataType.BOOL)
net_2 = core.Net("eq_bast_invalid")
result_2 = net_2.EQ(["X", "Y"], 1)
(shapes, types) = workspace.InferShapesAndTypes([net])
self.assertTrue(str(result_2) not in shapes)
def _run_single_test(
self, op, ref, A, B, reverse_inputs, test_grad, gc, dc):
inputs = [A, B]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, inputs, [0])
if test_grad:
for i in range(len(inputs)):
self.assertGradientChecks(
gc, op, inputs, i, [0],
ensure_outputs_are_inferred=True,
)
if reverse_inputs:
inputs = [B, A]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, inputs, [0])
if test_grad:
for i in range(len(inputs)):
self.assertGradientChecks(
gc, op, inputs, i, [0],
ensure_outputs_are_inferred=True,
)
def _test_binary_op(
self, op_name, np_ref, n, m, k, t, bias, test_grad, gc, dc):
op = core.CreateOperator(
op_name,
["A", "B"],
["C"],
)
def ref(A, B):
return [np_ref(A, B)]
A = np.random.rand(n, m, k, t).astype(np.float32) + bias
B = np.random.rand(n, m, k, t).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, True, test_grad, gc, dc)
A = np.random.rand(1).astype(np.float32) + bias
B = np.random.rand(n, m, k, t).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, True, test_grad, gc, dc)
A = np.random.rand(k, t).astype(np.float32) + bias
B = np.random.rand(n, m, k, t).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, True, test_grad, gc, dc)
A = np.random.rand(n, m, 1, 1).astype(np.float32) + bias
B = np.random.rand(n, m, k, t).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, True, test_grad, gc, dc)
A = np.random.rand(1, m, k, 1).astype(np.float32) + bias
B = np.random.rand(n, m, k, t).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, True, test_grad, gc, dc)
A = np.random.rand(m, 1, t).astype(np.float32) + bias
B = np.random.rand(n, m, k, t).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, True, test_grad, gc, dc)
A = np.random.rand(1, m, 1, t).astype(np.float32) + bias
B = np.random.rand(n, 1, k, 1).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, True, test_grad, gc, dc)
def _test_binary_op_in_place(
self, op_name, np_ref, n, m, bias, test_grad, in_place_2nd, gc, dc):
def ref(A, B):
return [np_ref(A, B)]
op = core.CreateOperator(
op_name,
["A", "B"],
["A"],
)
A = np.random.rand(n, m).astype(np.float32) + bias
B = np.random.rand(m).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, False, test_grad, gc, dc)
A = np.random.rand(n, m).astype(np.float32) + bias
B = np.random.rand(n, 1).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, False, test_grad, gc, dc)
if in_place_2nd:
op = core.CreateOperator(
op_name,
["A", "B"],
["B"],
)
A = np.random.rand(m).astype(np.float32) + bias
B = np.random.rand(n, m).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, False, test_grad, gc, dc)
A = np.random.rand(n, 1).astype(np.float32) + bias
B = np.random.rand(n, m).astype(np.float32) + bias
self._run_single_test(op, ref, A, B, False, test_grad, gc, dc)
@given(n=st.integers(0, 5), m=st.integers(0, 5), k=st.integers(0, 5),
t=st.integers(0, 5), **hu.gcs)
@settings(deadline=None, max_examples=50)
def test_add(self, n, m, k, t, gc, dc):
self._test_binary_op("Add", np.add, n, m, k, t, -0.5, True, gc, dc)
self._test_binary_op_in_place(
"Add", np.add, n, m, -0.5, True, True, gc, dc)
@given(n=st.integers(0, 5), m=st.integers(0, 5), k=st.integers(0, 5),
t=st.integers(0, 5), **hu.gcs)
@settings(deadline=None, max_examples=50)
def test_sub(self, n, m, k, t, gc, dc):
self._test_binary_op("Sub", np.subtract, n, m,
k, t, -0.5, True, gc, dc)
self._test_binary_op_in_place(
"Sub", np.subtract, n, m, -0.5, True, True, gc, dc)
@given(n=st.integers(0, 5), m=st.integers(0, 5), k=st.integers(0, 5),
t=st.integers(0, 5), **hu.gcs)
@settings(deadline=None, max_examples=50)
def test_mul(self, n, m, k, t, gc, dc):
self._test_binary_op("Mul", np.multiply, n, m,
k, t, -0.5, True, gc, dc)
@given(n=st.integers(0, 5), m=st.integers(0, 5), k=st.integers(0, 5),
t=st.integers(0, 5), **hu.gcs)
@settings(deadline=None, max_examples=50)
def test_div(self, n, m, k, t, gc, dc):
self._test_binary_op("Div", np.divide, n, m, k, t, 1.0, True, gc, dc)
self._test_binary_op_in_place(
"Div", np.divide, n, m, 1.0, True, False, gc, dc)
@given(n=st.integers(1, 5), m=st.integers(1, 5), broadcast=st.booleans(),
allow_broadcast_fastpath=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_div_legacy_grad(
self,
n: int,
m: int,
broadcast: bool,
allow_broadcast_fastpath: bool,
gc,
dc
):
op = core.CreateOperator(
"DivGradient",
["B", "C", "dC"],
["dA", "dB"],
allow_broadcast_fastpath=allow_broadcast_fastpath,
)
def div_grad_ref(B, C, dC):
dA = dC / B
dB = -dC * C / B
if broadcast:
dB = np.sum(dB, axis=0)
return [dA, dB]
if broadcast:
B = np.random.rand(m).astype(np.float32) + 1.0
else:
B = np.random.rand(n, m).astype(np.float32) + 1.0
C = np.random.randn(n, m).astype(np.float32)
dC = np.random.randn(n, m).astype(np.float32)
inputs = [B, C, dC]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=div_grad_ref,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1])
def _test_bitwise_binary_op(self, op_name, np_ref, n, m, k, t, gc, dc):
op = core.CreateOperator(
op_name,
["A", "B"],
["C"],
)
def ref(A, B):
return [np_ref(A, B)]
A = np.random.randint(128, size=(n, m, k, t))
B = np.random.randint(128, size=(n, m, k, t))
self._run_single_test(op, ref, A, B, True, False, gc, dc)
A = np.random.randint(128, size=1)
B = np.random.randint(128, size=(n, m, k, t))
self._run_single_test(op, ref, A, B, True, False, gc, dc)
A = np.random.randint(128, size=(k, t))
B = np.random.randint(128, size=(n, m, k, t))
self._run_single_test(op, ref, A, B, True, False, gc, dc)
A = np.random.randint(128, size=(n, m, 1, 1))
B = np.random.randint(128, size=(n, m, k, t))
self._run_single_test(op, ref, A, B, True, False, gc, dc)
A = np.random.randint(128, size=(1, m, k, 1))
B = np.random.randint(128, size=(n, m, k, t))
self._run_single_test(op, ref, A, B, True, False, gc, dc)
A = np.random.randint(128, size=(m, 1, t))
B = np.random.randint(128, size=(n, m, k, t))
self._run_single_test(op, ref, A, B, True, False, gc, dc)
A = np.random.randint(128, size=(1, m, 1, t))
B = np.random.randint(128, size=(n, 1, k, 1))
self._run_single_test(op, ref, A, B, True, False, gc, dc)
@given(n=st.integers(1, 5), m=st.integers(1, 5), k=st.integers(1, 5),
t=st.integers(1, 5), **hu.gcs)
@settings(deadline=10000)
def test_bitwise_and(self, n, m, k, t, gc, dc):
self._test_bitwise_binary_op(
"BitwiseAnd", np.bitwise_and, n, m, k, t, gc, dc)
@given(n=st.integers(1, 5), m=st.integers(1, 5), k=st.integers(1, 5),
t=st.integers(1, 5), **hu.gcs)
@settings(deadline=10000)
def test_bitwise_or(self, n, m, k, t, gc, dc):
self._test_bitwise_binary_op(
"BitwiseOr", np.bitwise_or, n, m, k, t, gc, dc)
@given(n=st.integers(1, 5), m=st.integers(1, 5), k=st.integers(1, 5),
t=st.integers(1, 5), **hu.gcs)
@settings(deadline=10000)
def test_bitwise_xor(self, n, m, k, t, gc, dc):
self._test_bitwise_binary_op(
"BitwiseXor", np.bitwise_xor, n, m, k, t, gc, dc)
@given(X=hu.tensor(elements=hu.floats(min_value=0.5, max_value=2), dtype=np.float32),
inplace=st.booleans(), **hu.gcs)
@settings(deadline=10000)
def test_reciprocal(self, X, inplace, gc, dc):
def reciprocal_op(X):
return [np.reciprocal(X)]
op = core.CreateOperator(
"Reciprocal",
["X"],
["X"] if inplace else ["Y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=reciprocal_op,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-3, threshold=0.05,
ensure_outputs_are_inferred=True)
@given(X=hu.tensor(dtype=np.bool), **hu.gcs)
@settings(deadline=10000)
def test_not(self, X, gc, dc):
def not_op(X):
return [np.logical_not(X)]
op = core.CreateOperator(
"Not",
["X"],
["Y"],
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=not_op,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
@given(X=hu.tensor(dtype=np.float32), **hu.gcs)
@settings(deadline=10000)
def test_log1p(self, X, gc, dc):
op = core.CreateOperator(
"Log1p",
["X"],
["Y"]
)
def ref_log1p(input):
result = np.log1p(input)
return (result,)
def ref_log1p_grad(g_out, outputs, fwd_inputs):
result = g_out / (fwd_inputs[0] + 1)
return (result,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=ref_log1p,
output_to_grad="Y",
grad_reference=ref_log1p_grad,
ensure_outputs_are_inferred=True,
)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/elementwise_ops_test.py
|
import unittest
import caffe2.python.hypothesis_test_util as hu
import numpy as np
from caffe2.python import core, workspace
def update_counter_ref(prev_iter, update_counter, indices, curr_iter, counter_halflife):
prev_iter_out = prev_iter.copy()
update_counter_out = update_counter.copy()
counter_neg_log_rho = np.log(2) / counter_halflife
for i in indices:
iter_diff = curr_iter[0] - prev_iter_out[i]
prev_iter_out[i] = curr_iter[0]
update_counter_out[i] = (
1.0 + np.exp(-iter_diff * counter_neg_log_rho) * update_counter_out[i]
)
return prev_iter_out, update_counter_out
class TestRowWiseCounter(hu.HypothesisTestCase):
def test_rowwise_counter(self):
h = 8 * 20
n = 5
curr_iter = np.array([100], dtype=np.int64)
update_counter = np.random.randint(99, size=h).astype(np.float64)
prev_iter = np.random.rand(h, 1).astype(np.int64)
indices = np.unique(np.random.randint(0, h, size=n))
indices.sort(axis=0)
counter_halflife = 1
net = core.Net("test_net")
net.Proto().type = "dag"
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("curr_iter", curr_iter)
workspace.FeedBlob("update_counter", update_counter)
workspace.FeedBlob("prev_iter", prev_iter)
net.RowWiseCounter(
["prev_iter", "update_counter", "indices", "curr_iter"],
["prev_iter", "update_counter"],
counter_halflife=counter_halflife,
)
workspace.RunNetOnce(net)
prev_iter_out = workspace.FetchBlob("prev_iter")
update_counter_out = workspace.FetchBlob("update_counter")
prev_iter_out_ref, update_counter_out_ref = update_counter_ref(
prev_iter,
update_counter,
indices,
curr_iter,
counter_halflife=counter_halflife,
)
assert np.allclose(prev_iter_out, prev_iter_out_ref, rtol=1e-3)
assert np.allclose(update_counter_out, update_counter_out_ref, rtol=1e-3)
if __name__ == "__main__":
global_options = ["caffe2"]
core.GlobalInit(global_options)
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/rowwise_counter_test.py
|
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
class TestEnforceFinite(hu.HypothesisTestCase):
@given(
X=hu.tensor(
# allow empty
min_value=0,
elements=hu.floats(allow_nan=True, allow_infinity=True),
),
**hu.gcs
)
@settings(deadline=10000)
def test_enforce_finite(self, X, gc, dc):
def all_finite_value(X):
if X.size <= 0:
return True
return np.isfinite(X).all()
net = core.Net('test_net')
net.Const(array=X, blob_out="X")
net.EnforceFinite("X", [])
if all_finite_value(X):
self.assertTrue(workspace.RunNetOnce(net))
else:
with self.assertRaises(RuntimeError):
workspace.RunNetOnce(net)
@given(
X=hu.tensor(
elements=hu.floats(min_value=0, max_value=10, allow_nan=False, allow_infinity=False),
),
**hu.gcs
)
def test_enforce_finite_device_check(self, X, gc, dc):
op = core.CreateOperator(
"EnforceFinite",
["X"],
[],
)
self.assertDeviceChecks(dc, op, [X], [])
|
pytorch-master
|
caffe2/python/operator_test/enforce_finite_op_test.py
|
from caffe2.python import brew, core, workspace
from caffe2.python.model_helper import ModelHelper
from functools import partial
from hypothesis import given, settings
from typing import Optional, Tuple
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import torch
import unittest
def _layer_norm_ref(axis, epsilon, X):
left = int(np.prod(X.shape[:axis]))
reshaped = np.reshape(X, [left, -1])
mean = np.mean(reshaped, axis=1).reshape([left, 1])
std = np.sqrt(np.mean(np.square(reshaped), axis=1).reshape(
[left, 1]) - np.square(mean) + epsilon)
Y = (reshaped - mean) / (std)
Y = np.reshape(Y, X.shape)
mean = np.reshape(mean, X.shape[:axis] + (1,))
std = np.reshape(std, X.shape[:axis] + (1,))
return (Y, mean, std)
def _layer_norm_with_affine_ref(axis, epsilon, X, gamma, beta):
Y, mean, std = _layer_norm_ref(axis, epsilon, X)
Y = Y * gamma + beta
return (Y, mean, std)
def _layer_norm_grad_ref(axis, gout_full, norm, mean_full, stdev_full, X_full):
left = int(np.prod(X_full.shape[:axis]))
right = int(np.prod(X_full.shape[axis:]))
X = np.reshape(X_full, [left, right])
stdev = np.reshape(stdev_full, [left, 1])
mean = np.reshape(mean_full, [left, 1])
gout = np.reshape(gout_full, [left, right])
dstdev_end = (-1.0) / np.power(stdev, 2.0) \
* np.sum((X - mean) * gout, axis=1).reshape([left, 1])
dmean_end = np.sum(-1.0 / stdev * gout, axis=1).reshape([left, 1])
dx_end = 1.0 / stdev * gout
# stdev block
dmean_stdev = -1.0 * mean / stdev * dstdev_end
dx_stdev = X / (right * stdev) * dstdev_end
# mean block
dmean = dmean_end + dmean_stdev
dxmean = (1.0 / right) * dmean
# final outputs
dx = dx_end + dx_stdev + dxmean
dx = dx.reshape(X_full.shape)
return [dx]
class TestLayerNormOp(serial.SerializedTestCase):
@given(X=hu.tensor(min_dim=2), **hu.gcs)
@settings(deadline=10000)
def test_layer_norm_grad_op(self, X, gc, dc):
axis = np.random.randint(0, len(X.shape))
epsilon = 1e-4
op = core.CreateOperator(
"LayerNormGradient",
["gout", "out", "mean", "stdev", "in"],
["gin"],
axis=axis,
epsilon=epsilon,
)
norm, mean, stdev = _layer_norm_ref(axis, epsilon, X)
gout = norm
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[gout, norm, mean, stdev, X],
reference=partial(_layer_norm_grad_ref, axis)
)
self.assertDeviceChecks(
device_options=dc,
op=op,
inputs=[gout, norm, mean, stdev, X],
outputs_to_check=[0],
)
@given(X=hu.tensor(min_dim=2),
eps=st.floats(1e-5, 1e-3),
elementwise_affine=st.booleans(),
**hu.gcs)
def test_layer_norm_op(self, X, eps, elementwise_affine, gc, dc):
axis = np.random.randint(0, len(X.shape))
op = core.CreateOperator(
"LayerNorm",
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y", "mean", "std"],
axis=axis,
epsilon=eps,
elementwise_affine=elementwise_affine,
)
if elementwise_affine:
ref = partial(_layer_norm_with_affine_ref, axis, eps)
else:
ref = partial(_layer_norm_ref, axis, eps)
if elementwise_affine:
gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
inputs = [X, gamma, beta]
else:
inputs = [X]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref,
)
self.assertDeviceChecks(
device_options=dc,
op=op,
inputs=inputs,
outputs_to_check=[0, 1, 2],
)
@given(M=st.integers(1, 10),
N=st.integers(10, 20),
axis=st.integers(0, 1),
eps=st.floats(1e-5, 1e-3),
elementwise_affine=st.booleans(),
**hu.gcs)
@settings(deadline=10000)
def test_layer_norm_grad(
self, M, N, axis, eps, elementwise_affine, gc, dc):
op = core.CreateOperator(
"LayerNorm",
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y", "mean", "std"],
axis=axis,
epsilon=eps,
elementwise_affine=elementwise_affine,
)
X = np.arange(M * N).astype(np.float32)
np.random.shuffle(X)
X = X.reshape((M, N))
if elementwise_affine:
gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
inputs = [X, gamma, beta]
else:
inputs = [X]
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
@unittest.skipIf(workspace.has_hip_support,
"Operator cross-calling doesn't work with hip yet")
@given(X=hu.tensor(min_dim=2),
eps=st.floats(1e-5, 1e-3),
elementwise_affine=st.booleans(),
**hu.gcs)
@settings(deadline=10000)
def test_layer_norm_op_c10(self, X, eps, elementwise_affine, gc, dc):
axis = np.random.randint(0, len(X.shape))
op = core.CreateOperator(
"C10LayerNorm_DontUseThisOpYet",
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y", "mean", "std"],
axis=axis,
epsilon=eps,
elementwise_affine=elementwise_affine,
)
if elementwise_affine:
ref = partial(_layer_norm_with_affine_ref, axis, eps)
else:
ref = partial(_layer_norm_ref, axis, eps)
if elementwise_affine:
gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
inputs = [X, gamma, beta]
else:
inputs = [X]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=ref,
)
self.assertDeviceChecks(
device_options=dc,
op=op,
inputs=inputs,
outputs_to_check=[0, 1, 2],
)
@unittest.skipIf(workspace.has_hip_support,
"Operator cross-calling doesn't work with hip yet")
@given(X=hu.tensor(min_dim=2),
eps=st.floats(1e-5, 1e-3),
elementwise_affine=st.booleans(),
**hu.gcs)
def test_layer_norm_op_c10_preallocated_outputs(
self, X, eps, elementwise_affine, gc, dc):
# This test case ensures that it works correctly when output tensors are
# preallocated.
axis = np.random.randint(0, len(X.shape))
self.ws.create_blob("X").feed(X)
if elementwise_affine:
gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
self.ws.create_blob("gamma").feed(gamma)
self.ws.create_blob("beta").feed(beta)
m = ModelHelper(name="test")
m.net.C10LayerNorm_DontUseThisOpYet(
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y", "mean", "std"],
axis=axis,
epsilon=eps,
elementwise_affine=elementwise_affine,
)
self.ws.create_net(m.param_init_net).run()
net = self.ws.create_net(m.net)
# run two times to be extra sure that the outputs are preallocated
net.run()
net.run()
if elementwise_affine:
expected_norm, expected_mean, expected_std = \
_layer_norm_with_affine_ref(axis, eps, X, gamma, beta)
else:
expected_norm, expected_mean, expected_std = _layer_norm_ref(
axis, eps, X)
actual_norm = self.ws.fetch_blob('Y')
actual_mean = self.ws.fetch_blob('mean')
actual_std = self.ws.fetch_blob('std')
torch.testing.assert_allclose(
expected_norm, actual_norm, rtol=1e-4, atol=1e-4)
torch.testing.assert_allclose(expected_mean, actual_mean)
torch.testing.assert_allclose(expected_std, actual_std)
@given(X=hu.tensor(min_dim=2),
eps=st.floats(1e-5, 1e-3),
elementwise_affine=st.booleans(),
**hu.gcs)
def test_layer_norm_op_pytorch(self, X, eps, elementwise_affine, gc, dc):
axis = np.random.randint(0, len(X.shape))
if elementwise_affine:
gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
expected_norm, expected_mean, expected_std = \
_layer_norm_with_affine_ref(axis, eps, X, gamma, beta)
actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
torch.tensor(X), torch.tensor(gamma), torch.tensor(beta),
axis, eps, True)
else:
expected_norm, expected_mean, expected_std = _layer_norm_ref(
axis, eps, X)
actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
torch.tensor(X), None, None, axis, eps)
torch.testing.assert_allclose(
expected_norm, actual_norm, rtol=1e-4, atol=1e-4)
torch.testing.assert_allclose(expected_mean, actual_mean)
torch.testing.assert_allclose(expected_std, actual_std)
# Test case is using workspace.has_cuda_support and not
# workspace.has_gpu_support to exclude it from HIP because tensor interop
# doesn't work for HIP tensors yet
@unittest.skipIf(not workspace.has_cuda_support, "No cuda support")
@given(X=hu.tensor(min_dim=2),
eps=st.floats(1e-5, 1e-3),
elementwise_affine=st.booleans())
def test_layer_norm_op_pytorch_cuda(self, X, eps, elementwise_affine):
axis = np.random.randint(0, len(X.shape))
if elementwise_affine:
gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
expected_norm, expected_mean, expected_std = \
_layer_norm_with_affine_ref(axis, eps, X, gamma, beta)
actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
torch.tensor(X).cuda(),
torch.tensor(gamma).cuda(),
torch.tensor(beta).cuda(),
axis,
eps,
True)
else:
expected_norm, expected_mean, expected_std = _layer_norm_ref(
axis, eps, X)
actual_norm, actual_mean, actual_std = torch.ops._caffe2.LayerNorm(
torch.tensor(X).cuda(), None, None, axis, eps)
torch.testing.assert_allclose(
expected_norm, actual_norm.cpu(), rtol=1e-4, atol=1e-4)
torch.testing.assert_allclose(expected_mean, actual_mean.cpu())
torch.testing.assert_allclose(expected_std, actual_std.cpu())
@given(X=hu.tensor(min_dim=2),
eps=st.floats(1e-5, 1e-3),
elementwise_affine=st.booleans(),
**hu.gcs)
@settings(deadline=10000)
def test_layer_norm_op_jit(self, X, eps, elementwise_affine, gc, dc):
@torch.jit.script
def jit_layer_norm(
X: torch.Tensor,
gamma: Optional[torch.Tensor] = None,
beta: Optional[torch.Tensor] = None,
axis: int = 1,
eps: float = 1e-5,
elementwise_affine: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
return torch.ops._caffe2.LayerNorm(
X, gamma, beta, axis, eps, elementwise_affine)
axis = np.random.randint(0, len(X.shape))
if elementwise_affine:
gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
expected_norm, expected_mean, expected_std = \
_layer_norm_with_affine_ref(axis, eps, X, gamma, beta)
actual_norm, actual_mean, actual_std = jit_layer_norm(
torch.tensor(X), torch.tensor(gamma), torch.tensor(beta),
axis, eps, elementwise_affine)
else:
expected_norm, expected_mean, expected_std = _layer_norm_ref(
axis, eps, X)
actual_norm, actual_mean, actual_std = jit_layer_norm(
torch.tensor(X), None, None, axis, eps, elementwise_affine)
torch.testing.assert_allclose(
expected_norm, actual_norm, rtol=1e-4, atol=1e-4)
torch.testing.assert_allclose(expected_mean, actual_mean)
torch.testing.assert_allclose(expected_std, actual_std)
@given(X=hu.tensor(min_dim=2), **hu.gcs)
def test_layer_norm_brew_wrapper(self, X, gc, dc):
axis = np.random.randint(0, len(X.shape))
scale_dim = [1] * np.ndim(X)
scale_dim[axis] = X.shape[axis]
self.ws.create_blob('input').feed(X)
model = ModelHelper(name='test_layer_norm_brew_wrapper')
brew.layer_norm(
model,
'input',
'output',
dim_in=X.shape[axis:],
axis=axis,
epsilon=1e-4,
)
self.ws.create_net(model.param_init_net).run()
self.ws.create_net(model.net).run()
@given(N=st.integers(1, 10), elementwise_affine=st.booleans(), **hu.gcs)
@settings(deadline=None)
def test_layer_norm_with_empty_batch(self, N, elementwise_affine, gc, dc):
X = np.random.randn(0, N).astype(np.float32)
gamma = np.random.rand(N).astype(np.float32)
beta = np.random.rand(N).astype(np.float32)
op = core.CreateOperator(
"LayerNorm",
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y", "mean", "sigma"],
elementwise_affine=elementwise_affine,
)
def ref(X, gamma=None, beta=None):
Y = np.zeros_like(X)
axis = 1
mean = np.zeros(X.shape[:axis] + (1,), dtype=X.dtype)
sigma = np.zeros(X.shape[:axis] + (1,), dtype=X.dtype)
return Y, mean, sigma
inputs = [X, gamma, beta] if elementwise_affine else [X]
self.assertReferenceChecks(gc, op, inputs, ref)
self.assertDeviceChecks(dc, op, inputs, [0, 1])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/layer_norm_op_test.py
|
from caffe2.proto import caffe2_pb2
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
import unittest
class TestONNXWhile(serial.SerializedTestCase):
@given(
condition=st.booleans(),
max_trip_count=st.integers(0, 100),
save_scopes=st.booleans(),
disable_scopes=st.booleans(),
seed=st.integers(0, 65535),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_onnx_while_fibb(
self, condition, max_trip_count, save_scopes, disable_scopes, seed, gc, dc):
np.random.seed(seed)
if disable_scopes:
save_scopes = False
# Create body net
body_net = caffe2_pb2.NetDef()
# Two loop carried dependencies: first and second
body_net.external_input.extend(['i', 'cond', 'first', 'second'])
body_net.external_output.extend(['cond_new', 'second', 'third', 'third'])
add_op = core.CreateOperator(
'Add',
['first', 'second'],
['third'],
)
print3 = core.CreateOperator(
'Print',
['third'],
[],
)
limit_const = core.CreateOperator(
'ConstantFill',
[],
['limit_const'],
shape=[1],
dtype=caffe2_pb2.TensorProto.FLOAT,
value=100.0,
)
cond = core.CreateOperator(
'LT',
['third', 'limit_const'],
['cond_new'],
)
body_net.op.extend([add_op, print3, limit_const, cond])
while_op = core.CreateOperator(
'ONNXWhile',
['max_trip_count', 'condition', 'first_init', 'second_init'],
['first_a', 'second_a', 'third_a'],
body=body_net,
has_cond=True,
has_trip_count=True,
save_scopes=save_scopes,
disable_scopes=disable_scopes,
)
condition_arr = np.array(condition).astype(np.bool)
max_trip_count_arr = np.array(max_trip_count).astype(np.int64)
first_init = np.array([1]).astype(np.float32)
second_init = np.array([1]).astype(np.float32)
def ref(max_trip_count, condition, first_init, second_init):
first = 1
second = 1
results = []
if condition:
for _ in range(max_trip_count):
third = first + second
first = second
second = third
results.append(third)
if third > 100:
break
return (first, second, np.array(results).astype(np.float32))
self.assertReferenceChecks(
gc,
while_op,
[max_trip_count_arr, condition_arr, first_init, second_init],
ref,
)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/onnx_while_test.py
|
import numpy as np
import unittest
from hypothesis import given, settings
import hypothesis.strategies as st
from caffe2.python import core, utils
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
#
# Should match original Detectron code at
# https://github.com/facebookresearch/Detectron/blob/master/lib/ops/collect_and_distribute_fpn_rpn_proposals.py
#
def boxes_area(boxes):
"""Compute the area of an array of boxes."""
w = (boxes[:, 2] - boxes[:, 0] + 1)
h = (boxes[:, 3] - boxes[:, 1] + 1)
areas = w * h
assert np.all(areas >= 0), 'Negative areas founds'
return areas
def map_rois_to_fpn_levels(
rois,
k_min, k_max,
roi_canonical_scale, roi_canonical_level
):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
# Compute level ids
s = np.sqrt(boxes_area(rois))
# Eqn.(1) in FPN paper
target_lvls = np.floor(
roi_canonical_level +
np.log2(s / roi_canonical_scale + 1e-6))
target_lvls = np.clip(target_lvls, k_min, k_max)
return target_lvls
def collect(inputs, **args):
post_nms_topN = args['rpn_post_nms_topN']
num_lvls = args['rpn_num_levels']
roi_inputs = inputs[:num_lvls]
score_inputs = inputs[num_lvls:]
# rois are in [[batch_idx, x0, y0, x1, y2], ...] format
# Combine predictions across all levels and retain the top scoring
#
# equivalent to Detectron code
# rois = np.concatenate([blob.data for blob in roi_inputs])
# scores = np.concatenate([blob.data for blob in score_inputs]).squeeze()
rois = np.concatenate(roi_inputs)
scores = np.concatenate(score_inputs).squeeze()
assert rois.shape[0] == scores.shape[0]
inds = np.argsort(-scores, kind='mergesort')[:post_nms_topN]
rois = rois[inds, :]
return rois
def distribute(rois, _, outputs, **args):
"""To understand the output blob order see return value of
roi_data.fast_rcnn.get_fast_rcnn_blob_names(is_training=False)
"""
# equivalent to Detectron code
# lvl_min = cfg.FPN.ROI_MIN_LEVEL
# lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvl_min = args['roi_min_level']
lvl_max = lvl_min + args['roi_num_levels'] - 1
lvls = map_rois_to_fpn_levels(
rois[:, 1:5],
lvl_min, lvl_max,
args['roi_canonical_scale'],
args['roi_canonical_level'])
# equivalent to Detectron code
# outputs[0].reshape(rois.shape)
# outputs[0].data[...] = rois
outputs[0] = rois
# Create new roi blobs for each FPN level
# (See: modeling.FPN.add_multilevel_roi_blobs which is similar but annoying
# to generalize to support this particular case.)
rois_idx_order = np.empty((0, ))
for output_idx, lvl in enumerate(range(lvl_min, lvl_max + 1)):
idx_lvl = np.where(lvls == lvl)[0]
blob_roi_level = rois[idx_lvl, :]
# equivalent to Detectron code
# outputs[output_idx + 1].reshape(blob_roi_level.shape)
# outputs[output_idx + 1].data[...] = blob_roi_level
outputs[output_idx + 1] = blob_roi_level
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_idx_restore = np.argsort(rois_idx_order, kind='mergesort')
# equivalent to Detectron code
# py_op_copy_blob(
# rois_idx_restore.astype(np.int32), outputs[-1])
outputs[-1] = rois_idx_restore.astype(np.int32)
def collect_and_distribute_fpn_rpn_ref(*inputs):
assert inputs
args = inputs[-1]
inputs = inputs[:-1]
num_rpn_lvls = args['rpn_num_levels']
assert len(inputs) == 2 * num_rpn_lvls
N = inputs[0].shape[0]
for i in range(num_rpn_lvls):
assert len(inputs[i].shape) == 2
assert inputs[i].shape[0] == N
assert inputs[i].shape[1] == 5
for i in range(num_rpn_lvls, 2 * num_rpn_lvls):
assert len(inputs[i].shape) == 1
assert inputs[i].shape[0] == N
num_roi_lvls = args['roi_num_levels']
outputs = (num_roi_lvls + 2) * [None]
rois = collect(inputs, **args)
distribute(rois, None, outputs, **args)
return outputs
def collect_rpn_ref(*inputs):
args = inputs[-1]
inputs = inputs[:-1]
rois = collect(inputs, **args)
return [rois]
def distribute_fpn_ref(*inputs):
args = inputs[-1]
inputs = inputs[:-1]
rois = inputs[0]
num_roi_lvls = args['roi_num_levels']
outputs = (num_roi_lvls + 2) * [None]
distribute(rois, None, outputs, **args)
# remove the first rois from output of distribute
outputs.pop(0)
return outputs
class TestCollectAndDistributeFpnRpnProposals(serial.SerializedTestCase):
@staticmethod
def _create_input(proposal_count, rpn_min_level, rpn_num_levels, roi_canonical_scale):
np.random.seed(0)
input_names = []
inputs = []
for lvl in range(rpn_num_levels):
rpn_roi = (
roi_canonical_scale *
np.random.rand(proposal_count, 5).astype(np.float32)
)
for i in range(proposal_count):
# Make RoIs have positive area, since they
# are in the format [[batch_idx, x0, y0, x1, y2], ...]
rpn_roi[i][3] += rpn_roi[i][1]
rpn_roi[i][4] += rpn_roi[i][2]
input_names.append('rpn_rois_fpn{}'.format(lvl + rpn_min_level))
inputs.append(rpn_roi)
for lvl in range(rpn_num_levels):
rpn_roi_score = np.random.rand(proposal_count).astype(np.float32)
input_names.append('rpn_roi_probs_fpn{}'.format(lvl + rpn_min_level))
inputs.append(rpn_roi_score)
return input_names, inputs
@given(proposal_count=st.integers(min_value=1000, max_value=8000),
rpn_min_level=st.integers(min_value=1, max_value=4),
rpn_num_levels=st.integers(min_value=1, max_value=6),
roi_min_level=st.integers(min_value=1, max_value=4),
roi_num_levels=st.integers(min_value=1, max_value=6),
rpn_post_nms_topN=st.integers(min_value=1000, max_value=4000),
roi_canonical_scale=st.integers(min_value=100, max_value=300),
roi_canonical_level=st.integers(min_value=1, max_value=8),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_collect_and_dist(
self,
proposal_count,
rpn_min_level, rpn_num_levels,
roi_min_level, roi_num_levels,
rpn_post_nms_topN,
roi_canonical_scale, roi_canonical_level,
gc, dc
):
input_names, inputs = self._create_input(
proposal_count, rpn_min_level, rpn_num_levels, roi_canonical_scale
)
output_names = [
'rois',
]
for lvl in range(roi_num_levels):
output_names.append('rois_fpn{}'.format(lvl + roi_min_level))
output_names.append('rois_idx_restore')
op = core.CreateOperator(
'CollectAndDistributeFpnRpnProposals',
input_names,
output_names,
arg=[
utils.MakeArgument("roi_canonical_scale", roi_canonical_scale),
utils.MakeArgument("roi_canonical_level", roi_canonical_level),
utils.MakeArgument("roi_max_level", roi_min_level + roi_num_levels - 1),
utils.MakeArgument("roi_min_level", roi_min_level),
utils.MakeArgument("rpn_max_level", rpn_min_level + rpn_num_levels - 1),
utils.MakeArgument("rpn_min_level", rpn_min_level),
utils.MakeArgument("rpn_post_nms_topN", rpn_post_nms_topN),
],
device_option=gc)
args = {
'rpn_min_level' : rpn_min_level,
'rpn_num_levels' : rpn_num_levels,
'roi_min_level' : roi_min_level,
'roi_num_levels' : roi_num_levels,
'rpn_post_nms_topN' : rpn_post_nms_topN,
'roi_canonical_scale' : roi_canonical_scale,
'roi_canonical_level' : roi_canonical_level}
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs + [args],
reference=collect_and_distribute_fpn_rpn_ref,
)
@given(
proposal_count=st.integers(min_value=1000, max_value=8000),
rpn_min_level=st.integers(min_value=1, max_value=4),
rpn_num_levels=st.integers(min_value=1, max_value=6),
roi_min_level=st.integers(min_value=1, max_value=4),
roi_num_levels=st.integers(min_value=1, max_value=6),
rpn_post_nms_topN=st.integers(min_value=1000, max_value=4000),
roi_canonical_scale=st.integers(min_value=100, max_value=300),
roi_canonical_level=st.integers(min_value=1, max_value=8),
**hu.gcs_cpu_only)
@settings(deadline=10000)
def test_collect_and_dist_separately(
self,
proposal_count,
rpn_min_level, rpn_num_levels,
roi_min_level, roi_num_levels,
rpn_post_nms_topN,
roi_canonical_scale, roi_canonical_level,
gc, dc
):
input_names, inputs = self._create_input(
proposal_count, rpn_min_level, rpn_num_levels, roi_canonical_scale
)
collect_op = core.CreateOperator(
'CollectRpnProposals',
input_names,
['rois'],
arg=[
utils.MakeArgument("rpn_max_level", rpn_min_level + rpn_num_levels - 1),
utils.MakeArgument("rpn_min_level", rpn_min_level),
utils.MakeArgument("rpn_post_nms_topN", rpn_post_nms_topN),
],
device_option=gc)
collect_args = {
'rpn_min_level' : rpn_min_level,
'rpn_num_levels' : rpn_num_levels,
'rpn_post_nms_topN' : rpn_post_nms_topN,
}
self.assertReferenceChecks(
device_option=gc,
op=collect_op,
inputs=inputs + [collect_args],
reference=collect_rpn_ref,
)
rois = collect(inputs, **collect_args)
output_names = []
for lvl in range(roi_num_levels):
output_names.append('rois_fpn{}'.format(lvl + roi_min_level))
output_names.append('rois_idx_restore')
distribute_op = core.CreateOperator(
'DistributeFpnProposals',
['rois'],
output_names,
arg=[
utils.MakeArgument("roi_canonical_scale", roi_canonical_scale),
utils.MakeArgument("roi_canonical_level", roi_canonical_level),
utils.MakeArgument("roi_max_level", roi_min_level + roi_num_levels - 1),
utils.MakeArgument("roi_min_level", roi_min_level),
],
device_option=gc)
distribute_args = {
'roi_min_level' : roi_min_level,
'roi_num_levels' : roi_num_levels,
'roi_canonical_scale' : roi_canonical_scale,
'roi_canonical_level' : roi_canonical_level}
self.assertReferenceChecks(
device_option=gc,
op=distribute_op,
inputs=[rois, distribute_args],
reference=distribute_fpn_ref,
)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/collect_and_distribute_fpn_rpn_proposals_op_test.py
|
import time
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
from hypothesis import given, settings
np.set_printoptions(precision=6)
class TestSpeedFloatToFusedRandRowwiseQuantized(hu.HypothesisTestCase):
@given(
bitwidth_=st.sampled_from([1, 2, 4, 8]),
random_=st.sampled_from([True, False]),
data_shape_=st.sampled_from(
[
np.array([32, 512]),
np.array([1, 1024]),
np.array([1024, 1024]),
np.array([1024, 1224]),
np.array([512, 969]),
]
),
**hu.gcs
)
@settings(deadline=10000)
def test_speed_of_rand_quantization(self, bitwidth_, random_, data_shape_, gc, dc):
X1 = np.random.rand(data_shape_[0], data_shape_[1]).astype(np.float32)
X2 = np.random.rand(data_shape_[0], data_shape_[1]).astype(np.float32)
sub_scale_sum_net = core.Net("sub_scale_sum")
sub_op = core.CreateOperator("Sub", ["X1", "X2"], ["dX"])
scale_op = core.CreateOperator("Scale", ["dX"], ["dX"], scale=0.023)
sum_op = core.CreateOperator("Sum", ["X2", "dX"], ["X2"])
sub_scale_sum_net.Proto().op.extend([sub_op, scale_op, sum_op])
enc_net = core.Net("enc")
enc_op = core.CreateOperator(
"FloatToFusedRandRowwiseQuantized",
["dX"],
["Y"],
bitwidth=bitwidth_,
random=random_,
)
enc_net.Proto().op.extend([enc_op])
dec_net = core.Net("dec")
dec_op = core.CreateOperator(
"FusedRandRowwiseQuantizedToFloat", ["Y"], ["decX"]
)
dec_net.Proto().op.extend([dec_op])
workspace.FeedBlob("X1", X1)
workspace.FeedBlob("X2", X2)
workspace.CreateNet(sub_scale_sum_net)
workspace.CreateNet(enc_net)
workspace.CreateNet(dec_net)
workspace.RunNet(sub_scale_sum_net)
workspace.RunNet(enc_net)
workspace.RunNet(dec_net)
sub_scale_sum_time = 0
enc_time = 0
dec_time = 0
times = 10
for _ in range(times):
start = time.time()
workspace.RunNet(sub_scale_sum_net)
end = time.time()
sub_scale_sum_time += end - start
start = time.time()
workspace.RunNet(enc_net)
end = time.time()
enc_time += end - start
start = time.time()
workspace.RunNet(dec_net)
end = time.time()
dec_time += end - start
print("Sub+Scale+Sum time: {} ms".format(sub_scale_sum_time / times * 1000))
print(
"Quantizing time: {} ms ({}X)".format(
enc_time / times * 1000, enc_time / sub_scale_sum_time
)
)
print(
"De-quantizing time: {} ms ({}X)".format(
dec_time / times * 1000, dec_time / sub_scale_sum_time
)
)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/rand_quantization_op_speed_test.py
|
#!/usr/bin/env python3
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
from hypothesis import given
class TestAsyncNetBarrierOp(hu.HypothesisTestCase):
@given(
n=st.integers(1, 5),
shape=st.lists(st.integers(0, 5), min_size=1, max_size=3),
**hu.gcs
)
def test_async_net_barrier_op(self, n, shape, dc, gc):
test_inputs = [(100 * np.random.random(shape)).astype(np.float32) for _ in range(n)]
test_input_blobs = ["x_{}".format(i) for i in range(n)]
barrier_op = core.CreateOperator(
"AsyncNetBarrier",
test_input_blobs,
test_input_blobs,
device_option=gc,
)
def reference_func(*args):
self.assertEquals(len(args), n)
return args
self.assertReferenceChecks(gc, barrier_op, test_inputs, reference_func)
|
pytorch-master
|
caffe2/python/operator_test/async_net_barrier_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
class TestIntegralImageOps(serial.SerializedTestCase):
@given(batch_size=st.integers(1, 3),
height=st.integers(7, 10),
width=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
@settings(deadline=10000)
def test_integral_image_ops(self, batch_size, height, width, channels, gc, dc):
N = batch_size
C = channels
H = height
W = width
im = np.random.rand(N, C, H, W).astype(np.float32)
op = core.CreateOperator("IntegralImage",
["im"], ["y"])
def integral_image(im):
y = np.random.rand(N, C, H + 1, W + 1).astype(np.float32)
for i1 in range(N):
for i2 in range(C):
for i3 in range(W + 1):
y[i1, i2, 0, i3] = 0
for i3 in range(H + 1):
y[i1, i2, i3, 0] = 0
for i3 in range(1, H + 1):
for i4 in range(1, W + 1):
y[i1, i2, i3, i4] = im[i1, i2, i3 - 1, i4 - 1] + \
y[i1, i2, i3 - 1, i4] + \
y[i1, i2, i3, i4 - 1] - \
y[i1, i2, i3 - 1, i4 - 1]
return [y]
self.assertDeviceChecks(dc, op, [im], [0])
self.assertReferenceChecks(gc, op, [im], integral_image)
@given(batch_size=st.integers(1, 3),
height=st.integers(7, 10),
width=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
@settings(deadline=10000)
def test_integral_image_gradient_ops(self, batch_size, height, width,
channels, gc, dc):
N = batch_size
C = channels
H = height
W = width
X = np.random.rand(N, C, H, W).astype(np.float32)
dY = np.random.rand(N, C, H + 1, W + 1).astype(np.float32)
op = core.CreateOperator(
"IntegralImageGradient",
["X", "dY"],
["dX"])
def integral_image_gradient(X, dY):
dX = np.random.rand(N, C, H, W).astype(np.float32)
dX1 = np.random.rand(N, C, H + 1, W).astype(np.float32)
#H+1,W+1=>H+1, W
for i1 in range(N):
for i2 in range(C):
for i3 in range(H + 1):
dX1[i1, i2, i3, 0] = dY[i1, i2, i3, 0]
for i4 in range(1, W):
dX1[i1, i2, i3, i4] = dY[i1, i2, i3, i4] + \
dX1[i1, i2, i3, i4 - 1]
#H+1,W=>H,W
for i1 in range(N):
for i2 in range(C):
for i3 in range(W):
dX[i1, i2, 0, i3] = dX1[i1, i2, 0, i3]
for i4 in range(1, H):
dX[i1, i2, i4, i3] = dX1[i1, i2, i4, i3] + \
dX[i1, i2, i4 - 1, i3]
return [dX]
self.assertDeviceChecks(dc, op, [X, dY], [0])
self.assertReferenceChecks(gc, op, [X, dY], integral_image_gradient)
|
pytorch-master
|
caffe2/python/operator_test/integral_image_ops_test.py
|
import hypothesis.strategies as st
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
class TestKeySplitOps(hu.HypothesisTestCase):
@given(
X=hu.arrays(
dims=[1000],
dtype=np.int64,
elements=st.integers(min_value=0, max_value=100)
),
**hu.gcs_cpu_only
)
def test_key_split_op(self, X, gc, dc):
categorical_limit = max(X) + 1
workspace.ResetWorkspace()
workspace.FeedBlob('X', X)
output_blobs = ['Y_%d' % i for i in range(categorical_limit)]
op = core.CreateOperator(
'KeySplit', ['X'],
output_blobs,
categorical_limit=categorical_limit
)
workspace.RunOperatorOnce(op)
output_vecs = [
workspace.blobs[output_blobs[i]] for i in range(categorical_limit)
]
expected_output_vecs = [[] for _ in range(categorical_limit)]
for i, x in enumerate(X):
expected_output_vecs[x].append(i)
for i in range(categorical_limit):
np.testing.assert_array_equal(
output_vecs[i],
np.array(expected_output_vecs[i], dtype=np.int32)
)
|
pytorch-master
|
caffe2/python/operator_test/key_split_ops_test.py
|
import functools
import numpy as np
from hypothesis import given, settings
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import copy
class TestNormalizeOp(hu.HypothesisTestCase):
@given(
X=hu.tensor(
min_dim=1, max_dim=5, elements=hu.floats(min_value=0.5, max_value=1.0)
),
**hu.gcs
)
@settings(max_examples=10, deadline=None)
def test_normalize(self, X, gc, dc):
def ref_normalize(X, axis):
x_normed = X / np.maximum(
np.sqrt((X ** 2).sum(axis=axis, keepdims=True)), 1e-12
)
return (x_normed,)
for axis in range(-X.ndim, X.ndim):
x = copy.copy(X)
op = core.CreateOperator("Normalize", "X", "Y", axis=axis)
self.assertReferenceChecks(
gc, op, [x], functools.partial(ref_normalize, axis=axis)
)
self.assertDeviceChecks(dc, op, [x], [0])
self.assertGradientChecks(gc, op, [x], 0, [0])
@given(
X=hu.tensor(
min_dim=1, max_dim=5, elements=hu.floats(min_value=0.5, max_value=1.0)
),
**hu.gcs
)
@settings(max_examples=10, deadline=None)
def test_normalize_L1(self, X, gc, dc):
def ref(X, axis):
norm = abs(X).sum(axis=axis, keepdims=True)
return (X / norm,)
for axis in range(-X.ndim, X.ndim):
print("axis: ", axis)
op = core.CreateOperator("NormalizeL1", "X", "Y", axis=axis)
self.assertReferenceChecks(gc, op, [X], functools.partial(ref, axis=axis))
self.assertDeviceChecks(dc, op, [X], [0])
|
pytorch-master
|
caffe2/python/operator_test/normalize_op_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.serialized_test.serialized_test_util as serial
from hypothesis import given, settings
import hypothesis.strategies as st
import numpy as np
class TestMarginRankingCriterion(serial.SerializedTestCase):
@given(N=st.integers(min_value=10, max_value=20),
seed=st.integers(min_value=0, max_value=65535),
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
@settings(deadline=10000)
def test_margin_ranking_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
X1 = np.random.randn(N).astype(np.float32)
X2 = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
"MarginRankingCriterion", ["X1", "X2", "Y"], ["loss"],
margin=margin)
def ref_cec(X1, X2, Y):
result = np.maximum(-Y * (X1 - X2) + margin, 0)
return (result, )
inputs = [X1, X2, Y]
# This checks the op implementation against a reference function in
# python.
self.assertReferenceChecks(gc, op, inputs, ref_cec)
# This checks the op implementation over multiple device options (e.g.
# CPU and CUDA). [0] means that the 0-th output is checked.
self.assertDeviceChecks(dc, op, inputs, [0])
# Make singular points less sensitive
X1[np.abs(margin - Y * (X1 - X2)) < 0.1] += 0.1
X2[np.abs(margin - Y * (X1 - X2)) < 0.1] -= 0.1
# Check dX1
self.assertGradientChecks(gc, op, inputs, 0, [0])
# Check dX2
self.assertGradientChecks(gc, op, inputs, 1, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/margin_ranking_criterion_op_test.py
|
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
class TestFeatureMapsOps(TestCase):
def test_merge_dense_feature_tensors(self):
op = core.CreateOperator(
"MergeDenseFeatureTensors",
[
"in1", "in1_presence",
],
[
"out_lengths", "out_keys", "out_values",
],
feature_ids=[11, 12, 13, 14]
)
# Input 1.
workspace.FeedBlob(
"in1",
np.array([[11.1, 12.1, 13.1, 14.1], [11.2, 12.2, 13.2, 14.2]], dtype=np.float)
)
workspace.FeedBlob(
"in1_presence",
np.array([[True, False, False, True], [False, True, True, False]], dtype=np.bool)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("out_lengths"),
np.array([2, 2], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_keys"),
np.array([11, 14, 12, 13], dtype=np.int64)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values"),
np.array([11.1, 14.1, 12.2, 13.2], dtype=np.float)
)
def test_merge_single_scalar_feature_tensors(self):
op = core.CreateOperator(
"MergeSingleScalarFeatureTensors",
[
"in1", "in1_presence",
"in2", "in2_presence",
],
[
"out_lengths", "out_keys", "out_values",
],
feature_ids=[11, 12]
)
# Input 1.
workspace.FeedBlob(
"in1",
np.array([11.1, 0.0], dtype=np.float)
)
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
)
# Input 2.
workspace.FeedBlob(
"in2",
np.array([12.1, 12.2], dtype=np.float)
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("out_lengths"),
np.array([2, 1], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_keys"),
np.array([11, 12, 12], dtype=np.int64)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values"),
np.array([11.1, 12.1, 12.2], dtype=np.float)
)
def test_merge_single_scalar_feature_tensors_gradient(self):
op = core.CreateOperator(
"MergeSingleScalarFeatureTensorsGradient",
[
"in1_presence",
"in2_presence",
"in3_presence",
"out_values_grad",
],
[
"in1_grad", "in2_grad", "in3_grad",
],
)
# Inputs 1, 2 & 3.
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
)
workspace.FeedBlob(
"in3_presence",
np.array([False, True], dtype=np.bool)
)
# Input 4.
workspace.FeedBlob(
"out_values_grad",
np.array([0.1, 1.1, 1.2, 2.3], dtype=np.float)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("in1_grad"),
np.array([0.1, 0], dtype=np.float)
)
np.testing.assert_array_equal(
workspace.FetchBlob("in2_grad"),
np.array([1.1, 1.2], dtype=np.float)
)
np.testing.assert_array_equal(
workspace.FetchBlob("in3_grad"),
np.array([0, 2.3], dtype=np.float)
)
def test_merge_single_scalar_feature_tensors_gradient_with_strings(self):
op = core.CreateOperator(
"MergeSingleScalarFeatureTensorsGradient",
[
"in1_presence",
"in2_presence",
"in3_presence",
"out_values_grad",
],
[
"in1_grad", "in2_grad", "in3_grad",
],
)
# Inputs 1, 2 & 3.
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
)
workspace.FeedBlob(
"in3_presence",
np.array([False, True], dtype=np.bool)
)
# Input 4.
workspace.FeedBlob(
"out_values_grad",
np.array(["0.1", "1.1", "1.2", "2.3"], dtype=np.unicode_)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("in1_grad"),
np.array(["0.1", ""], dtype=np.bytes_)
)
np.testing.assert_array_equal(
workspace.FetchBlob("in2_grad"),
np.array(["1.1", "1.2"], dtype=np.bytes_)
)
np.testing.assert_array_equal(
workspace.FetchBlob("in3_grad"),
np.array(["", "2.3"], dtype=np.bytes_)
)
def test_merge_single_list_feature_tensors(self):
op = core.CreateOperator(
"MergeSingleListFeatureTensors",
[
"in1_lengths", "in1_values", "in1_presence",
"in2_lengths", "in2_values", "in2_presence",
],
[
"out_lengths", "out_keys", "out_values_lengths",
"out_values_values",
],
feature_ids=[11, 12]
)
# Input 1.
workspace.FeedBlob(
"in1_lengths",
np.array([2, 0], dtype=np.int32)
)
workspace.FeedBlob(
"in1_values",
np.array([11.1, 11.2], dtype=np.float)
)
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
)
# Input 2.
workspace.FeedBlob(
"in2_lengths",
np.array([2, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in2_values",
np.array([12.1, 12.2, 12.3, 12.4], dtype=np.float)
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("out_lengths"),
np.array([2, 1], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_keys"),
np.array([11, 12, 12], dtype=np.int64)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values_lengths"),
np.array([2, 2, 2], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values_values"),
np.array([11.1, 11.2, 12.1, 12.2, 12.3, 12.4], dtype=np.float)
)
def test_merge_single_list_feature_tensors_gradient(self):
self._test_merge_single_list_or_map_feature_tensors_gradient(
"MergeSingleListFeatureTensorsGradient"
)
def test_merge_single_map_feature_tensors_gradient(self):
self._test_merge_single_list_or_map_feature_tensors_gradient(
"MergeSingleMapFeatureTensorsGradient"
)
def _test_merge_single_list_or_map_feature_tensors_gradient(self, op_name):
op = core.CreateOperator(
op_name,
[
"in1_lengths", "in1_presence",
"in2_lengths", "in2_presence",
"out_values_values_grad",
],
[
"in1_values_grad",
"in2_values_grad",
],
)
# Input 1.
workspace.FeedBlob(
"in1_lengths",
np.array([2, 0], dtype=np.int32)
)
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
)
# Input 2.
workspace.FeedBlob(
"in2_lengths",
np.array([2, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
)
workspace.FeedBlob(
"out_values_values_grad",
np.array([11.1, 11.2, 12.1, 12.2, 12.3, 12.4], dtype=np.float)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("in1_values_grad"),
np.array([11.1, 11.2], dtype=np.float)
)
np.testing.assert_array_equal(
workspace.FetchBlob("in2_values_grad"),
np.array([12.1, 12.2, 12.3, 12.4], dtype=np.float)
)
def test_merge_single_map_feature_tensors(self):
op = core.CreateOperator(
"MergeSingleMapFeatureTensors",
[
"in1_lengths", "in1_keys", "in1_values", "in1_presence",
"in2_lengths", "in2_keys", "in2_values", "in2_presence",
],
[
"out_lengths", "out_keys", "out_values_lengths",
"out_values_keys", "out_values_values",
],
feature_ids=[11, 12]
)
# Input 1.
workspace.FeedBlob(
"in1_lengths",
np.array([2, 0], dtype=np.int32)
)
workspace.FeedBlob(
"in1_keys",
np.array([111, 112], dtype=np.int64)
)
workspace.FeedBlob(
"in1_values",
np.array([11.1, 11.2], dtype=np.float)
)
workspace.FeedBlob(
"in1_presence",
np.array([True, False], dtype=np.bool)
)
# Input 2.
workspace.FeedBlob(
"in2_lengths",
np.array([2, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in2_keys",
np.array([121, 122, 123, 124], dtype=np.int64)
)
workspace.FeedBlob(
"in2_values",
np.array([12.1, 12.2, 12.3, 12.4], dtype=np.float)
)
workspace.FeedBlob(
"in2_presence",
np.array([True, True], dtype=np.bool)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("out_lengths"),
np.array([2, 1], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_keys"),
np.array([11, 12, 12], dtype=np.int64)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values_lengths"),
np.array([2, 2, 2], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values_keys"),
np.array([111, 112, 121, 122, 123, 124], dtype=np.int64)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values_values"),
np.array([11.1, 11.2, 12.1, 12.2, 12.3, 12.4], dtype=np.float)
)
def test_merge_multi_scalar_feature_tensors(self):
op = core.CreateOperator(
"MergeMultiScalarFeatureTensors",
[
"in1_lengths", "in1_keys", "in1_values",
"in2_lengths", "in2_keys", "in2_values",
],
[
"out_lengths", "out_keys", "out_values",
]
)
# Input 1.
workspace.FeedBlob(
"in1_lengths",
np.array([1, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in1_keys",
np.array([11, 12, 13], dtype=np.int64)
)
workspace.FeedBlob(
"in1_values",
np.array([11.0, 12.0, 13.0], dtype=np.float)
)
# Input 2.
workspace.FeedBlob(
"in2_lengths",
np.array([2, 1], dtype=np.int32)
)
workspace.FeedBlob(
"in2_keys",
np.array([14, 15, 16], dtype=np.int64)
)
workspace.FeedBlob(
"in2_values",
np.array([14.0, 15.0, 16.0], dtype=np.float)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("out_lengths"),
np.array([3, 3], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_keys"),
np.array([11, 14, 15, 12, 13, 16], dtype=np.int64)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values"),
np.array([11.0, 14.0, 15.0, 12.0, 13.0, 16.0], dtype=np.float)
)
def test_merge_multi_scalar_feature_tensors_gradient(self):
op = core.CreateOperator(
"MergeMultiScalarFeatureTensorsGradient",
[
"in1_lengths",
"in2_lengths",
"out_values_grad"
],
[
"in1_values_grad",
"in2_values_grad",
]
)
# Input 1.
workspace.FeedBlob(
"in1_lengths",
np.array([1, 2, 0], dtype=np.int32)
)
# Input 2.
workspace.FeedBlob(
"in2_lengths",
np.array([2, 1, 1], dtype=np.int32)
)
# Grad input.
workspace.FeedBlob(
"out_values_grad",
np.array([11.0, 14.0, 15.0, 12.0, 13.0, 16.0, 17.0], dtype=np.float)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("in1_values_grad"),
np.array([11.0, 12.0, 13.0], dtype=np.float)
)
np.testing.assert_array_equal(
workspace.FetchBlob("in2_values_grad"),
np.array([14.0, 15.0, 16.0, 17.0], dtype=np.float)
)
def test_merge_multi_list_feature_tensors(self):
op = core.CreateOperator(
"MergeMultiListFeatureTensors",
[
"in1_lengths", "in1_keys", "in1_values_lengths",
"in1_values_values",
"in2_lengths", "in2_keys", "in2_values_lengths",
"in2_values_values",
],
[
"out_lengths", "out_keys", "out_values_lengths",
"out_values_values"
]
)
# Input 1.
workspace.FeedBlob(
"in1_lengths",
np.array([1, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in1_keys",
np.array([11, 12, 13], dtype=np.int64)
)
workspace.FeedBlob(
"in1_values_lengths",
np.array([2, 2, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in1_values_values",
np.array([11.1, 11.2, 12.1, 12.2, 13.1, 13.2], dtype=np.float)
)
# Input 2.
workspace.FeedBlob(
"in2_lengths",
np.array([2, 1], dtype=np.int32)
)
workspace.FeedBlob(
"in2_keys",
np.array([14, 15, 16], dtype=np.int64)
)
workspace.FeedBlob(
"in2_values_lengths",
np.array([2, 2, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in2_values_values",
np.array([14.1, 14.2, 15.1, 15.2, 16.1, 16.2], dtype=np.float)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("out_lengths"),
np.array([3, 3], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_keys"),
np.array([11, 14, 15, 12, 13, 16], dtype=np.int64)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values_lengths"),
np.array([2, 2, 2, 2, 2, 2], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values_values"),
np.array(
[
11.1, 11.2, 14.1, 14.2, 15.1, 15.2, 12.1, 12.2, 13.1, 13.2,
16.1, 16.2
],
dtype=np.float
)
)
def test_merge_multi_map_feature_tensors(self):
op = core.CreateOperator(
"MergeMultiMapFeatureTensors",
[
"in1_lengths", "in1_keys", "in1_values_lengths",
"in1_values_keys", "in1_values_values",
"in2_lengths", "in2_keys", "in2_values_lengths",
"in2_values_keys", "in2_values_values",
],
[
"out_lengths", "out_keys", "out_values_lengths",
"out_values_keys", "out_values_values"
]
)
# Input 1.
workspace.FeedBlob(
"in1_lengths",
np.array([1, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in1_keys",
np.array([11, 12, 13], dtype=np.int64)
)
workspace.FeedBlob(
"in1_values_lengths",
np.array([2, 2, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in1_values_keys",
np.array([111, 112, 121, 122, 131, 132], dtype=np.int64)
)
workspace.FeedBlob(
"in1_values_values",
np.array([11.1, 11.2, 12.1, 12.2, 13.1, 13.2], dtype=np.float)
)
# Input 2.
workspace.FeedBlob(
"in2_lengths",
np.array([2, 1], dtype=np.int32)
)
workspace.FeedBlob(
"in2_keys",
np.array([14, 15, 16], dtype=np.int64)
)
workspace.FeedBlob(
"in2_values_lengths",
np.array([2, 2, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in2_values_keys",
np.array([141, 142, 151, 152, 161, 162], dtype=np.int64)
)
workspace.FeedBlob(
"in2_values_values",
np.array([14.1, 14.2, 15.1, 15.2, 16.1, 16.2], dtype=np.float)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("out_lengths"),
np.array([3, 3], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_keys"),
np.array([11, 14, 15, 12, 13, 16], dtype=np.int64)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values_lengths"),
np.array([2, 2, 2, 2, 2, 2], dtype=np.int32)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values_keys"),
np.array(
[111, 112, 141, 142, 151, 152, 121, 122, 131, 132, 161, 162],
dtype=np.int64
)
)
np.testing.assert_array_equal(
workspace.FetchBlob("out_values_values"),
np.array(
[
11.1, 11.2, 14.1, 14.2, 15.1, 15.2, 12.1, 12.2, 13.1, 13.2,
16.1, 16.2
],
dtype=np.float
)
)
def test_merge_multi_list_feature_tensors_gradient(self):
self._test_merge_multi_list_or_map_feature_tensors_gradient(
"MergeMultiListFeatureTensorsGradient"
)
def test_merge_multi_map_feature_tensors_gradient(self):
self._test_merge_multi_list_or_map_feature_tensors_gradient(
"MergeMultiMapFeatureTensorsGradient"
)
def _test_merge_multi_list_or_map_feature_tensors_gradient(self, op_name):
op = core.CreateOperator(
op_name,
[
"in1_lengths", "in1_values_lengths",
"in2_lengths", "in2_values_lengths",
"out_values_values_grad"
],
[
"in1_values_values_grad",
"in2_values_values_grad",
]
)
# Input 1.
workspace.FeedBlob(
"in1_lengths",
np.array([1, 2], dtype=np.int32)
)
workspace.FeedBlob(
"in1_values_lengths",
np.array([2, 2, 2], dtype=np.int32)
)
# Input 2.
workspace.FeedBlob(
"in2_lengths",
np.array([2, 1], dtype=np.int32)
)
workspace.FeedBlob(
"in2_values_lengths",
np.array([2, 2, 2], dtype=np.int32)
)
# Grad Input.
workspace.FeedBlob(
"out_values_values_grad",
np.array(
[
11.1, 11.2, 14.1, 14.2, 15.1, 15.2, 12.1, 12.2, 13.1, 13.2,
16.1, 16.2
],
dtype=np.float
)
)
workspace.RunOperatorOnce(op)
np.testing.assert_array_equal(
workspace.FetchBlob("in1_values_values_grad"),
np.array([11.1, 11.2, 12.1, 12.2, 13.1, 13.2], dtype=np.float)
)
np.testing.assert_array_equal(
workspace.FetchBlob("in2_values_values_grad"),
np.array([14.1, 14.2, 15.1, 15.2, 16.1, 16.2], dtype=np.float)
)
|
pytorch-master
|
caffe2/python/operator_test/feature_maps_ops_test.py
|
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import caffe2.python.serialized_test.serialized_test_util as serial
import hypothesis.strategies as st
import numpy as np
import unittest
class TestCeil(serial.SerializedTestCase):
@given(X=hu.tensor(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
@settings(deadline=10000)
def test_ceil(self, X, gc, dc, engine):
op = core.CreateOperator("Ceil", ["X"], ["Y"], engine=engine)
def ceil_ref(X):
return (np.ceil(X),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=ceil_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/operator_test/ceil_op_test.py
|
import numpy as np
import caffe2.python.models.shufflenet as shufflenet
import hypothesis.strategies as st
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.models.imagenet_trainer_test_utils as utils
class ShufflenetMemongerTest(hu.HypothesisTestCase):
@given(with_shapes=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=2, deadline=None)
def test_shufflenet_shared_grads(self, with_shapes, gc, dc):
results = utils.test_shared_grads(
with_shapes,
shufflenet.create_shufflenet,
'gpu_0/stage1_conv_w',
'gpu_0/last_out_L1000'
)
self.assertTrue(results[0][0] < results[0][1])
np.testing.assert_almost_equal(results[1][0], results[1][1])
np.testing.assert_almost_equal(results[2][0], results[2][1])
def test_shufflenet_forward_only(self):
results = utils.test_forward_only(
shufflenet.create_shufflenet,
'gpu_0/last_out_L1000'
)
self.assertTrue(results[0][0] < results[0][1])
self.assertTrue(results[1] < 10 and results[1] > 0)
np.testing.assert_almost_equal(results[2][0], results[2][1])
def test_shufflenet_forward_only_fast_simplenet(self):
'''
Test C++ memonger that is only for simple nets
'''
results = utils.test_forward_only_fast_simplenet(
shufflenet.create_shufflenet,
'gpu_0/last_out_L1000'
)
self.assertTrue(results[0][0] < results[0][1])
self.assertTrue(results[1] < 4 and results[1] > 0)
np.testing.assert_almost_equal(results[2][0], results[2][1])
if __name__ == "__main__":
import unittest
import random
random.seed(2006)
# pyre-fixme[10]: Name `workspace` is used but not defined in the current scope
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_print_blob_sizes_at_exit=0',
'--caffe2_gpu_memory_tracking=1'])
unittest.main()
|
pytorch-master
|
caffe2/python/models/shufflenet_test.py
|
import numpy as np
import time
from caffe2.python import workspace, cnn, memonger, core
def has_blob(proto, needle):
for op in proto.op:
for inp in op.input:
if inp == needle:
return True
for outp in op.output:
if outp == needle:
return True
return False
def count_blobs(proto):
blobs = set()
for op in proto.op:
blobs = blobs.union(set(op.input)).union(set(op.output))
return len(blobs)
def count_shared_blobs(proto):
blobs = set()
for op in proto.op:
blobs = blobs.union(set(op.input)).union(set(op.output))
return len([b for b in blobs if "_shared" in b])
def test_shared_grads(
with_shapes,
create_model,
conv_blob,
last_out_blob,
data_blob='gpu_0/data',
label_blob='gpu_0/label',
num_labels=1000,
):
model = cnn.CNNModelHelper(
order="NCHW",
name="test",
cudnn_exhaustive_search=True,
)
with core.NameScope("gpu_0"):
data = model.net.AddExternalInput(data_blob)
label = model.net.AddExternalInput(label_blob)
(_softmax, loss) = create_model(
model,
data,
num_input_channels=3,
num_labels=num_labels,
label=label,
is_test=False,
)
param_to_grad = model.AddGradientOperators([loss])
(shapes, types) = workspace.InferShapesAndTypes(
[model.param_init_net, model.net],
{data_blob: [4, 3, 227, 227],
label_blob: [4]},
)
count_before = count_blobs(model.net.Proto())
optim_proto = memonger.share_grad_blobs(
model.net,
["gpu_0/loss"],
set(model.param_to_grad.values()),
"gpu_0/",
share_activations=True,
dont_share_blobs=set([str(param_to_grad[conv_blob])]),
blob_shapes=shapes if with_shapes else None,
)
count_after = count_blobs(optim_proto)
# Run model and compare results. We check that the loss is same
# and also that the final gradient (conv1_w_grad is same)
workspace.RunNetOnce(model.param_init_net)
data = np.random.rand(4, 3, 227, 227).astype(np.float32)
label = (np.random.rand(4) * num_labels).astype(np.int32)
workspace.FeedBlob(data_blob, data)
workspace.FeedBlob(label_blob, label)
workspace.RunNetOnce(model.net)
model.net.Proto().type = 'dag'
model.net.Proto().num_workers = 4
loss1 = workspace.FetchBlob(last_out_blob)
conv1_w_grad = workspace.FetchBlob(param_to_grad[conv_blob])
workspace.FeedBlob(param_to_grad[conv_blob], np.array([0.0]))
workspace.RunNetOnce(optim_proto)
optimized_loss1 = workspace.FetchBlob(last_out_blob)
optim_conv1_w_grad = workspace.FetchBlob(param_to_grad[conv_blob])
return [(count_after, count_before),
(loss1, optimized_loss1),
(conv1_w_grad, optim_conv1_w_grad)]
def test_forward_only(
create_model,
last_out_blob,
data_blob='gpu_0/data',
num_labels=1000,
):
model = cnn.CNNModelHelper(
order="NCHW",
name="test",
cudnn_exhaustive_search=True,
)
with core.NameScope("gpu_0"):
data = model.net.AddExternalInput(data_blob)
create_model(
model,
data,
num_input_channels=3,
num_labels=num_labels,
is_test=True
)
count_before = count_blobs(model.net.Proto())
optim_proto = memonger.optimize_inference_for_dag(
model.net, [data_blob], "gpu_0/"
)
count_after = count_blobs(optim_proto)
num_shared_blobs = count_shared_blobs(optim_proto)
# Run model and compare results
workspace.RunNetOnce(model.param_init_net)
data = np.random.rand(4, 3, 227, 227).astype(np.float32)
workspace.FeedBlob(data_blob, data)
workspace.RunNetOnce(model.net)
model.net.Proto().type = 'dag'
model.net.Proto().num_workers = 4
loss1 = workspace.FetchBlob(last_out_blob)
workspace.RunNetOnce(optim_proto)
optimized_loss1 = workspace.FetchBlob(last_out_blob)
return [(count_after, count_before),
(num_shared_blobs),
(loss1, optimized_loss1)]
def test_forward_only_fast_simplenet(
create_model,
last_out_blob,
data_blob="gpu_0/data",
num_labels=1000,
):
model = cnn.CNNModelHelper(
order="NCHW",
name="test",
cudnn_exhaustive_search=True,
)
with core.NameScope("gpu_0"):
data = model.net.AddExternalInput(data_blob)
create_model(
model,
data,
num_input_channels=3,
num_labels=num_labels,
is_test=True
)
count_before = count_blobs(model.net.Proto())
t = time.time()
optim_proto = memonger.optimize_inference_fast(
model.net.Proto(),
set([data_blob, last_out_blob]).union(
set(model.net.Proto().external_input))
)
print("Optimization took {} secs".format(time.time() - t))
count_after = count_blobs(optim_proto)
num_shared_blobs = count_shared_blobs(optim_proto)
print(count_after, count_before, num_shared_blobs)
# Run model and compare results
workspace.RunNetOnce(model.param_init_net)
data = np.random.rand(4, 3, 227, 227).astype(np.float32)
workspace.FeedBlob(data_blob, data)
model.net.Proto().type = 'simple'
workspace.RunNetOnce(model.net)
loss1 = workspace.FetchBlob(last_out_blob)
workspace.RunNetOnce(optim_proto)
optimized_loss1 = workspace.FetchBlob(last_out_blob)
return [(count_after, count_before),
(num_shared_blobs),
(loss1, optimized_loss1)]
|
pytorch-master
|
caffe2/python/models/imagenet_trainer_test_utils.py
|
## @package download
# Module caffe2.python.models.download
import argparse
import os
import sys
import signal
import re
import json
from caffe2.proto import caffe2_pb2
# Import urllib
from urllib.error import HTTPError, URLError
import urllib.request as urllib
# urllib requires more work to deal with a redirect, so not using vanity url
DOWNLOAD_BASE_URL = "https://s3.amazonaws.com/download.caffe2.ai/models/"
DOWNLOAD_COLUMNS = 70
# Don't let urllib hang up on big downloads
def signalHandler(signal, frame):
print("Killing download...")
exit(0)
signal.signal(signal.SIGINT, signalHandler)
def deleteDirectory(top_dir):
for root, dirs, files in os.walk(top_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top_dir)
def progressBar(percentage):
full = int(DOWNLOAD_COLUMNS * percentage / 100)
bar = full * "#" + (DOWNLOAD_COLUMNS - full) * " "
sys.stdout.write(u"\u001b[1000D[" + bar + "] " + str(percentage) + "%")
sys.stdout.flush()
def downloadFromURLToFile(url, filename, show_progress=True):
try:
print("Downloading from {url}".format(url=url))
response = urllib.urlopen(url)
size = int(response.info().get('Content-Length').strip())
chunk = min(size, 8192)
print("Writing to {filename}".format(filename=filename))
if show_progress:
downloaded_size = 0
progressBar(0)
with open(filename, "wb") as local_file:
while True:
data_chunk = response.read(chunk)
if not data_chunk:
break
local_file.write(data_chunk)
if show_progress:
downloaded_size += len(data_chunk)
progressBar(int(100 * downloaded_size / size))
print("") # New line to fix for progress bar
except HTTPError as e:
raise Exception("Could not download model. [HTTP Error] {code}: {reason}."
.format(code=e.code, reason=e.reason))
except URLError as e:
raise Exception("Could not download model. [URL Error] {reason}."
.format(reason=e.reason))
def getURLFromName(name, filename):
return "{base_url}{name}/{filename}".format(base_url=DOWNLOAD_BASE_URL,
name=name, filename=filename)
def downloadModel(model, args):
# Figure out where to store the model
model_folder = '{folder}'.format(folder=model)
dir_path = os.path.dirname(os.path.realpath(__file__))
if args.install:
model_folder = '{dir_path}/{folder}'.format(dir_path=dir_path,
folder=model)
# Check if that folder is already there
if os.path.exists(model_folder) and not os.path.isdir(model_folder):
if not args.force:
raise Exception("Cannot create folder for storing the model,\
there exists a file of the same name.")
else:
print("Overwriting existing file! ({filename})"
.format(filename=model_folder))
os.remove(model_folder)
if os.path.isdir(model_folder):
if not args.force:
response = ""
query = "Model already exists, continue? [y/N] "
try:
response = raw_input(query)
except NameError:
response = input(query)
if response.upper() == 'N' or not response:
print("Cancelling download...")
exit(0)
print("Overwriting existing folder! ({filename})".format(filename=model_folder))
deleteDirectory(model_folder)
# Now we can safely create the folder and download the model
os.makedirs(model_folder)
for f in ['predict_net.pb', 'init_net.pb']:
try:
downloadFromURLToFile(getURLFromName(model, f),
'{folder}/{f}'.format(folder=model_folder,
f=f))
except Exception as e:
print("Abort: {reason}".format(reason=str(e)))
print("Cleaning up...")
deleteDirectory(model_folder)
exit(0)
if args.install:
os.symlink("{folder}/__sym_init__.py".format(folder=dir_path),
"{folder}/__init__.py".format(folder=model_folder))
def validModelName(name):
invalid_names = ['__init__']
if name in invalid_names:
return False
if not re.match("^[/0-9a-zA-Z_-]+$", name):
return False
return True
class ModelDownloader:
def __init__(self, model_env_name='CAFFE2_MODELS'):
self.model_env_name = model_env_name
def _model_dir(self, model):
caffe2_home = os.path.expanduser(os.getenv('CAFFE2_HOME', '~/.caffe2'))
models_dir = os.getenv(self.model_env_name, os.path.join(caffe2_home, 'models'))
return os.path.join(models_dir, model)
def _download(self, model):
model_dir = self._model_dir(model)
assert not os.path.exists(model_dir)
os.makedirs(model_dir)
for f in ['predict_net.pb', 'init_net.pb', 'value_info.json']:
url = getURLFromName(model, f)
dest = os.path.join(model_dir, f)
try:
downloadFromURLToFile(url, dest, show_progress=False)
except TypeError:
# show_progress not supported prior to
# Caffe2 78c014e752a374d905ecfb465d44fa16e02a28f1
# (Sep 17, 2017)
downloadFromURLToFile(url, dest)
except Exception:
deleteDirectory(model_dir)
raise
# This version returns an extra debug_str argument that helps to understand
# why our work sometimes fails in sandcastle
def get_c2_model_dbg(self, model_name):
debug_str = "get_c2_model debug:\n"
model_dir = self._model_dir(model_name)
if not os.path.exists(model_dir):
self._download(model_name)
c2_predict_pb = os.path.join(model_dir, 'predict_net.pb')
debug_str += "c2_predict_pb path: " + c2_predict_pb + "\n"
c2_predict_net = caffe2_pb2.NetDef()
with open(c2_predict_pb, 'rb') as f:
len_read = c2_predict_net.ParseFromString(f.read())
debug_str += "c2_predict_pb ParseFromString = " + str(len_read) + "\n"
c2_predict_net.name = model_name
c2_init_pb = os.path.join(model_dir, 'init_net.pb')
debug_str += "c2_init_pb path: " + c2_init_pb + "\n"
c2_init_net = caffe2_pb2.NetDef()
with open(c2_init_pb, 'rb') as f:
len_read = c2_init_net.ParseFromString(f.read())
debug_str += "c2_init_pb ParseFromString = " + str(len_read) + "\n"
c2_init_net.name = model_name + '_init'
with open(os.path.join(model_dir, 'value_info.json')) as f:
value_info = json.load(f)
return c2_init_net, c2_predict_net, value_info, debug_str
def get_c2_model(self, model_name):
init_net, predict_net, value_info, _ = self.get_c2_model_dbg(model_name)
return init_net, predict_net, value_info
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Download or install pretrained models.')
parser.add_argument('model', nargs='+',
help='Model to download/install.')
parser.add_argument('-i', '--install', action='store_true',
help='Install the model.')
parser.add_argument('-f', '--force', action='store_true',
help='Force a download/installation.')
args = parser.parse_args()
for model in args.model:
if validModelName(model):
downloadModel(model, args)
else:
print("'{}' is not a valid model name.".format(model))
|
pytorch-master
|
caffe2/python/models/download.py
|
pytorch-master
|
caffe2/python/models/__init__.py
|
|
# Module caffe2.python.models.shufflenet
from caffe2.python import brew
"""
Utilitiy for creating ShuffleNet
"ShuffleNet V2: Practical Guidelines for EfficientCNN Architecture Design" by Ma et. al. 2018
"""
OUTPUT_CHANNELS = {
'0.5x': [24, 48, 96, 192, 1024],
'1.0x': [24, 116, 232, 464, 1024],
'1.5x': [24, 176, 352, 704, 1024],
'2.0x': [24, 244, 488, 976, 2048],
}
class ShuffleNetV2Builder():
def __init__(
self,
model,
data,
num_input_channels,
num_labels,
num_groups=2,
width='1.0x',
is_test=False,
detection=False,
bn_epsilon=1e-5,
):
self.model = model
self.prev_blob = data
self.num_input_channels = num_input_channels
self.num_labels = num_labels
self.num_groups = num_groups
self.output_channels = OUTPUT_CHANNELS[width]
self.stage_repeats = [3, 7, 3]
self.is_test = is_test
self.detection = detection
self.bn_epsilon = bn_epsilon
def create(self):
in_channels = self.output_channels[0]
self.prev_blob = brew.conv(self.model, self.prev_blob, 'stage1_conv',
self.num_input_channels, in_channels,
weight_init=("MSRAFill", {}),
kernel=3, stride=2)
self.prev_blob = brew.max_pool(self.model, self.prev_blob,
'stage1_pool', kernel=3, stride=2)
# adds stage#{2,3,4}; see table 5 of the ShufflenetV2 paper.
for idx, (out_channels, n_repeats) in enumerate(zip(
self.output_channels[1:4], self.stage_repeats
)):
prefix = 'stage{}_stride{}'.format(idx + 2, 2)
self.add_spatial_ds_unit(prefix, in_channels, out_channels)
in_channels = out_channels
for i in range(n_repeats):
prefix = 'stage{}_stride{}_repeat{}'.format(
idx + 2, 1, i + 1
)
self.add_basic_unit(prefix, in_channels)
self.last_conv = brew.conv(self.model, self.prev_blob, 'conv5',
in_channels, self.output_channels[4],
kernel=1)
self.avg_pool = self.model.AveragePool(self.last_conv, 'avg_pool',
kernel=7)
self.last_out = brew.fc(self.model,
self.avg_pool,
'last_out_L{}'.format(self.num_labels),
self.output_channels[4],
self.num_labels)
# spatial down sampling unit with stride=2
def add_spatial_ds_unit(self, prefix, in_channels, out_channels, stride=2):
right = left = self.prev_blob
out_channels = out_channels // 2
# Enlarge the receptive field for detection task
if self.detection:
left = self.add_detection_unit(left, prefix + '_left_detection',
in_channels, in_channels)
left = self.add_dwconv3x3_bn(left, prefix + 'left_dwconv',
in_channels, stride)
left = self.add_conv1x1_bn(left, prefix + '_left_conv1', in_channels,
out_channels)
if self.detection:
right = self.add_detection_unit(right, prefix + '_right_detection',
in_channels, in_channels)
right = self.add_conv1x1_bn(right, prefix + '_right_conv1',
in_channels, out_channels)
right = self.add_dwconv3x3_bn(right, prefix + '_right_dwconv',
out_channels, stride)
right = self.add_conv1x1_bn(right, prefix + '_right_conv2',
out_channels, out_channels)
self.prev_blob = brew.concat(self.model, [right, left],
prefix + '_concat')
self.prev_blob = self.model.net.ChannelShuffle(
self.prev_blob, prefix + '_ch_shuffle',
group=self.num_groups, kernel=1
)
# basic unit with stride=1
def add_basic_unit(self, prefix, in_channels, stride=1):
in_channels = in_channels // 2
left = prefix + '_left'
right = prefix + '_right'
self.model.net.Split(self.prev_blob, [left, right])
if self.detection:
right = self.add_detection_unit(right, prefix + '_right_detection',
in_channels, in_channels)
right = self.add_conv1x1_bn(right, prefix + '_right_conv1',
in_channels, in_channels)
right = self.add_dwconv3x3_bn(right, prefix + '_right_dwconv',
in_channels, stride)
right = self.add_conv1x1_bn(right, prefix + '_right_conv2',
in_channels, in_channels)
self.prev_blob = brew.concat(self.model, [right, left],
prefix + '_concat')
self.prev_blob = self.model.net.ChannelShuffle(
self.prev_blob, prefix + '_ch_shuffle',
group=self.num_groups, kernel=1
)
# helper functions to create net's units
def add_detection_unit(self, prev_blob, prefix, in_channels, out_channels,
kernel=3, pad=1):
out_blob = brew.conv(self.model, prev_blob, prefix + '_conv',
in_channels, out_channels, kernel=kernel,
weight_init=("MSRAFill", {}),
group=in_channels, pad=pad)
out_blob = brew.spatial_bn(self.model, out_blob, prefix + '_bn',
out_channels, epsilon=self.bn_epsilon,
is_test=self.is_test)
return out_blob
def add_conv1x1_bn(self, prev_blob, blob, in_channels, out_channels):
prev_blob = brew.conv(self.model, prev_blob, blob, in_channels,
out_channels, kernel=1,
weight_init=("MSRAFill", {}))
prev_blob = brew.spatial_bn(self.model, prev_blob, prev_blob + '_bn',
out_channels,
epsilon=self.bn_epsilon,
is_test=self.is_test)
prev_blob = brew.relu(self.model, prev_blob, prev_blob)
return prev_blob
def add_dwconv3x3_bn(self, prev_blob, blob, channels, stride):
prev_blob = brew.conv(self.model, prev_blob, blob, channels,
channels, kernel=3,
weight_init=("MSRAFill", {}),
stride=stride, group=channels, pad=1)
prev_blob = brew.spatial_bn(self.model, prev_blob,
prev_blob + '_bn',
channels,
epsilon=self.bn_epsilon,
is_test=self.is_test)
return prev_blob
def create_shufflenet(
model,
data,
num_input_channels,
num_labels,
label=None,
is_test=False,
no_loss=False,
):
builder = ShuffleNetV2Builder(model, data, num_input_channels,
num_labels,
is_test=is_test)
builder.create()
if no_loss:
return builder.last_out
if (label is not None):
(softmax, loss) = model.SoftmaxWithLoss(
[builder.last_out, label],
["softmax", "loss"],
)
return (softmax, loss)
|
pytorch-master
|
caffe2/python/models/shufflenet.py
|
## @package resnet
# Module caffe2.python.models.resnet
from caffe2.python import brew
import logging
'''
Utility for creating ResNe(X)t
"Deep Residual Learning for Image Recognition" by He, Zhang et. al. 2015
"Aggregated Residual Transformations for Deep Neural Networks" by Xie et. al. 2016
'''
class ResNetBuilder():
'''
Helper class for constructing residual blocks.
'''
def __init__(
self,
model,
prev_blob,
no_bias,
is_test,
bn_epsilon=1e-5,
bn_momentum=0.9,
):
self.model = model
self.comp_count = 0
self.comp_idx = 0
self.prev_blob = prev_blob
self.is_test = is_test
self.bn_epsilon = bn_epsilon
self.bn_momentum = bn_momentum
self.no_bias = 1 if no_bias else 0
def add_conv(
self,
in_filters,
out_filters,
kernel,
stride=1,
group=1,
pad=0,
):
self.comp_idx += 1
self.prev_blob = brew.conv(
self.model,
self.prev_blob,
'comp_%d_conv_%d' % (self.comp_count, self.comp_idx),
in_filters,
out_filters,
weight_init=("MSRAFill", {}),
kernel=kernel,
stride=stride,
group=group,
pad=pad,
no_bias=self.no_bias,
)
return self.prev_blob
def add_relu(self):
self.prev_blob = brew.relu(
self.model,
self.prev_blob,
self.prev_blob, # in-place
)
return self.prev_blob
def add_spatial_bn(self, num_filters):
self.prev_blob = brew.spatial_bn(
self.model,
self.prev_blob,
'comp_%d_spatbn_%d' % (self.comp_count, self.comp_idx),
num_filters,
epsilon=self.bn_epsilon,
momentum=self.bn_momentum,
is_test=self.is_test,
)
return self.prev_blob
'''
Add a "bottleneck" component as described in He et. al. Figure 3 (right)
'''
def add_bottleneck(
self,
input_filters, # num of feature maps from preceding layer
base_filters, # num of filters internally in the component
output_filters, # num of feature maps to output
stride=1,
group=1,
spatial_batch_norm=True,
):
self.comp_idx = 0
shortcut_blob = self.prev_blob
# 1x1
self.add_conv(
input_filters,
base_filters,
kernel=1,
stride=1,
)
if spatial_batch_norm:
self.add_spatial_bn(base_filters)
self.add_relu()
# 3x3 (note the pad, required for keeping dimensions)
self.add_conv(
base_filters,
base_filters,
kernel=3,
stride=stride,
group=group,
pad=1,
)
if spatial_batch_norm:
self.add_spatial_bn(base_filters)
self.add_relu()
# 1x1
last_conv = self.add_conv(base_filters, output_filters, kernel=1)
if spatial_batch_norm:
last_conv = self.add_spatial_bn(output_filters)
# Summation with input signal (shortcut)
# When the number of feature maps mismatch between the input
# and output (this usually happens when the residual stage
# changes), we need to do a projection for the short cut
if output_filters != input_filters:
shortcut_blob = brew.conv(
self.model,
shortcut_blob,
'shortcut_projection_%d' % self.comp_count,
input_filters,
output_filters,
weight_init=("MSRAFill", {}),
kernel=1,
stride=stride,
no_bias=self.no_bias,
)
if spatial_batch_norm:
shortcut_blob = brew.spatial_bn(
self.model,
shortcut_blob,
'shortcut_projection_%d_spatbn' % self.comp_count,
output_filters,
epsilon=self.bn_epsilon,
momentum=self.bn_momentum,
is_test=self.is_test,
)
self.prev_blob = brew.sum(
self.model, [shortcut_blob, last_conv],
'comp_%d_sum_%d' % (self.comp_count, self.comp_idx)
)
self.comp_idx += 1
self.add_relu()
# Keep track of number of high level components if this ResNetBuilder
self.comp_count += 1
return output_filters
def add_simple_block(
self,
input_filters,
num_filters,
down_sampling=False,
spatial_batch_norm=True
):
self.comp_idx = 0
shortcut_blob = self.prev_blob
# 3x3
self.add_conv(
input_filters,
num_filters,
kernel=3,
stride=(1 if down_sampling is False else 2),
pad=1
)
if spatial_batch_norm:
self.add_spatial_bn(num_filters)
self.add_relu()
last_conv = self.add_conv(num_filters, num_filters, kernel=3, pad=1)
if spatial_batch_norm:
last_conv = self.add_spatial_bn(num_filters)
# Increase of dimensions, need a projection for the shortcut
if (num_filters != input_filters):
shortcut_blob = brew.conv(
self.model,
shortcut_blob,
'shortcut_projection_%d' % self.comp_count,
input_filters,
num_filters,
weight_init=("MSRAFill", {}),
kernel=1,
stride=(1 if down_sampling is False else 2),
no_bias=self.no_bias,
)
if spatial_batch_norm:
shortcut_blob = brew.spatial_bn(
self.model,
shortcut_blob,
'shortcut_projection_%d_spatbn' % self.comp_count,
num_filters,
epsilon=1e-3,
is_test=self.is_test,
)
self.prev_blob = brew.sum(
self.model, [shortcut_blob, last_conv],
'comp_%d_sum_%d' % (self.comp_count, self.comp_idx)
)
self.comp_idx += 1
self.add_relu()
# Keep track of number of high level components if this ResNetBuilder
self.comp_count += 1
def create_resnet_32x32(
model, data, num_input_channels, num_groups, num_labels, is_test=False
):
'''
Create residual net for smaller images (sec 4.2 of He et. al (2015))
num_groups = 'n' in the paper
'''
# conv1 + maxpool
brew.conv(
model, data, 'conv1', num_input_channels, 16, kernel=3, stride=1
)
brew.spatial_bn(
model, 'conv1', 'conv1_spatbn', 16, epsilon=1e-3, is_test=is_test
)
brew.relu(model, 'conv1_spatbn', 'relu1')
# Number of blocks as described in sec 4.2
filters = [16, 32, 64]
builder = ResNetBuilder(model, 'relu1', no_bias=0, is_test=is_test)
prev_filters = 16
for groupidx in range(0, 3):
for blockidx in range(0, 2 * num_groups):
builder.add_simple_block(
prev_filters if blockidx == 0 else filters[groupidx],
filters[groupidx],
down_sampling=(True if blockidx == 0 and
groupidx > 0 else False))
prev_filters = filters[groupidx]
# Final layers
brew.average_pool(
model, builder.prev_blob, 'final_avg', kernel=8, stride=1
)
brew.fc(model, 'final_avg', 'last_out', 64, num_labels)
softmax = brew.softmax(model, 'last_out', 'softmax')
return softmax
RESNEXT_BLOCK_CONFIG = {
18: (2, 2, 2, 2),
34: (3, 4, 6, 3),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
200: (3, 24, 36, 3),
}
RESNEXT_STRIDES = [1, 2, 2, 2]
logging.basicConfig()
log = logging.getLogger("resnext_builder")
log.setLevel(logging.DEBUG)
# The conv1 and final_avg kernel/stride args provide a basic mechanism for
# adapting resnet50 for different sizes of input images.
def create_resnext(
model,
data,
num_input_channels,
num_labels,
num_layers,
num_groups,
num_width_per_group,
label=None,
is_test=False,
no_loss=False,
no_bias=1,
conv1_kernel=7,
conv1_stride=2,
final_avg_kernel=7,
log=None,
bn_epsilon=1e-5,
bn_momentum=0.9,
):
if num_layers not in RESNEXT_BLOCK_CONFIG:
log.error("{}-layer is invalid for resnext config".format(num_layers))
num_blocks = RESNEXT_BLOCK_CONFIG[num_layers]
strides = RESNEXT_STRIDES
num_filters = [64, 256, 512, 1024, 2048]
if num_layers in [18, 34]:
num_filters = [64, 64, 128, 256, 512]
# the number of features before the last FC layer
num_features = num_filters[-1]
# conv1 + maxpool
conv_blob = brew.conv(
model,
data,
'conv1',
num_input_channels,
num_filters[0],
weight_init=("MSRAFill", {}),
kernel=conv1_kernel,
stride=conv1_stride,
pad=3,
no_bias=no_bias
)
bn_blob = brew.spatial_bn(
model,
conv_blob,
'conv1_spatbn_relu',
num_filters[0],
epsilon=bn_epsilon,
momentum=bn_momentum,
is_test=is_test
)
relu_blob = brew.relu(model, bn_blob, bn_blob)
max_pool = brew.max_pool(model, relu_blob, 'pool1', kernel=3, stride=2, pad=1)
# Residual blocks...
builder = ResNetBuilder(model, max_pool, no_bias=no_bias,
is_test=is_test, bn_epsilon=1e-5, bn_momentum=0.9)
inner_dim = num_groups * num_width_per_group
# 4 different kinds of residual blocks
for residual_idx in range(4):
residual_num = num_blocks[residual_idx]
residual_stride = strides[residual_idx]
dim_in = num_filters[residual_idx]
for blk_idx in range(residual_num):
dim_in = builder.add_bottleneck(
dim_in,
inner_dim,
num_filters[residual_idx + 1], # dim out
stride=residual_stride if blk_idx == 0 else 1,
group=num_groups,
)
inner_dim *= 2
# Final layers
final_avg = brew.average_pool(
model,
builder.prev_blob,
'final_avg',
kernel=final_avg_kernel,
stride=1,
global_pooling=True,
)
# Final dimension of the "image" is reduced to 7x7
last_out = brew.fc(
model, final_avg, 'last_out_L{}'.format(num_labels), num_features, num_labels
)
if no_loss:
return last_out
# If we create model for training, use softmax-with-loss
if (label is not None):
(softmax, loss) = model.SoftmaxWithLoss(
[last_out, label],
["softmax", "loss"],
)
return (softmax, loss)
else:
# For inference, we just return softmax
return brew.softmax(model, last_out, "softmax")
# The conv1 and final_avg kernel/stride args provide a basic mechanism for
# adapting resnet50 for different sizes of input images.
def create_resnet50(
model,
data,
num_input_channels,
num_labels,
label=None,
is_test=False,
no_loss=False,
no_bias=0,
conv1_kernel=7,
conv1_stride=2,
final_avg_kernel=7,
):
# resnet50 is a special case for ResNeXt50-1x64d
return create_resnext(
model,
data,
num_input_channels,
num_labels,
num_layers=50,
num_groups=1,
num_width_per_group=64,
label=label,
is_test=is_test,
no_loss=no_loss,
no_bias=no_bias,
conv1_kernel=conv1_kernel,
conv1_stride=conv1_stride,
final_avg_kernel=final_avg_kernel,
)
|
pytorch-master
|
caffe2/python/models/resnet.py
|
import os
from caffe2.proto import caffe2_pb2
def _parseFile(filename):
out_net = caffe2_pb2.NetDef()
# TODO(bwasti): A more robust handler for pathnames.
dir_path = os.path.dirname(__file__)
with open('{dir_path}/{filename}'.format(dir_path=dir_path,
filename=filename), 'rb') as f:
out_net.ParseFromString(f.read())
return out_net
init_net = _parseFile('init_net.pb')
predict_net = _parseFile('predict_net.pb')
|
pytorch-master
|
caffe2/python/models/__sym_init__.py
|
import numpy as np
import caffe2.python.models.resnet as resnet
import hypothesis.strategies as st
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.models.imagenet_trainer_test_utils as utils
class ResnetMemongerTest(hu.HypothesisTestCase):
@given(with_shapes=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=2, deadline=None)
def test_resnet_shared_grads(self, with_shapes, gc, dc):
results = utils.test_shared_grads(
with_shapes,
resnet.create_resnet50,
'gpu_0/conv1_w',
'gpu_0/last_out_L1000'
)
self.assertTrue(results[0][0] < results[0][1])
np.testing.assert_almost_equal(results[1][0], results[1][1])
np.testing.assert_almost_equal(results[2][0], results[2][1])
def test_resnet_forward_only(self):
results = utils.test_forward_only(
resnet.create_resnet50,
'gpu_0/last_out_L1000'
)
self.assertTrue(results[0][0] < results[0][1])
self.assertTrue(results[1] < 7 and results[1] > 0)
np.testing.assert_almost_equal(results[2][0], results[2][1])
def test_resnet_forward_only_fast_simplenet(self):
'''
Test C++ memonger that is only for simple nets
'''
results = utils.test_forward_only_fast_simplenet(
resnet.create_resnet50,
'gpu_0/last_out_L1000'
)
self.assertTrue(results[0][0] < results[0][1])
self.assertTrue(results[1] < 4 and results[1] > 0)
np.testing.assert_almost_equal(results[2][0], results[2][1])
if __name__ == "__main__":
import unittest
import random
random.seed(2603)
# pyre-fixme[10]: Name `workspace` is used but not defined in the current scope
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_print_blob_sizes_at_exit=0',
'--caffe2_gpu_memory_tracking=1'])
unittest.main()
|
pytorch-master
|
caffe2/python/models/resnet_test.py
|
## @package translate
# Module caffe2.python.models.seq2seq.translate
from abc import ABCMeta, abstractmethod
import argparse
from future.utils import viewitems
import logging
import numpy as np
import sys
from caffe2.python import core, rnn_cell, workspace
from caffe2.python.models.seq2seq.beam_search import BeamSearchForwardOnly
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
def _weighted_sum(model, values, weight, output_name):
values_weights = zip(values, [weight] * len(values))
values_weights_flattened = [x for v_w in values_weights for x in v_w]
return model.net.WeightedSum(
values_weights_flattened,
output_name,
)
class Seq2SeqModelCaffe2EnsembleDecoderBase(metaclass=ABCMeta):
@abstractmethod
def get_model_file(self, model):
pass
@abstractmethod
def get_db_type(self):
pass
def build_word_rewards(self, vocab_size, word_reward, unk_reward):
word_rewards = np.full([vocab_size], word_reward, dtype=np.float32)
word_rewards[seq2seq_util.PAD_ID] = 0
word_rewards[seq2seq_util.GO_ID] = 0
word_rewards[seq2seq_util.EOS_ID] = 0
word_rewards[seq2seq_util.UNK_ID] = word_reward + unk_reward
return word_rewards
def load_models(self):
db_reader = 'reader'
for model, scope_name in zip(
self.models,
self.decoder_scope_names,
):
params_for_current_model = [
param
for param in self.model.GetAllParams()
if str(param).startswith(scope_name)
]
assert workspace.RunOperatorOnce(core.CreateOperator(
'CreateDB',
[], [db_reader],
db=self.get_model_file(model),
db_type=self.get_db_type())
), 'Failed to create db {}'.format(self.get_model_file(model))
assert workspace.RunOperatorOnce(core.CreateOperator(
'Load',
[db_reader],
params_for_current_model,
load_all=1,
add_prefix=scope_name + '/',
strip_prefix='gpu_0/',
))
logger.info('Model {} is loaded from a checkpoint {}'.format(
scope_name, self.get_model_file(model)))
class Seq2SeqModelCaffe2EnsembleDecoder(Seq2SeqModelCaffe2EnsembleDecoderBase):
def get_model_file(self, model):
return model['model_file']
def get_db_type(self):
return 'minidb'
def scope(self, scope_name, blob_name):
return (
scope_name + '/' + blob_name
if scope_name is not None
else blob_name
)
def _build_decoder(
self,
model,
step_model,
model_params,
scope,
previous_tokens,
timestep,
fake_seq_lengths,
):
attention_type = model_params['attention']
assert attention_type in ['none', 'regular']
use_attention = (attention_type != 'none')
with core.NameScope(scope):
encoder_embeddings = seq2seq_util.build_embeddings(
model=model,
vocab_size=self.source_vocab_size,
embedding_size=model_params['encoder_embedding_size'],
name='encoder_embeddings',
freeze_embeddings=False,
)
(
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
) = seq2seq_util.build_embedding_encoder(
model=model,
encoder_params=model_params['encoder_type'],
num_decoder_layers=len(model_params['decoder_layer_configs']),
inputs=self.encoder_inputs,
input_lengths=self.encoder_lengths,
vocab_size=self.source_vocab_size,
embeddings=encoder_embeddings,
embedding_size=model_params['encoder_embedding_size'],
use_attention=use_attention,
num_gpus=0,
forward_only=True,
scope=scope,
)
with core.NameScope(scope):
if use_attention:
# [max_source_length, beam_size, encoder_output_dim]
encoder_outputs = model.net.Tile(
encoder_outputs,
'encoder_outputs_tiled',
tiles=self.beam_size,
axis=1,
)
if weighted_encoder_outputs is not None:
weighted_encoder_outputs = model.net.Tile(
weighted_encoder_outputs,
'weighted_encoder_outputs_tiled',
tiles=self.beam_size,
axis=1,
)
decoder_embeddings = seq2seq_util.build_embeddings(
model=model,
vocab_size=self.target_vocab_size,
embedding_size=model_params['decoder_embedding_size'],
name='decoder_embeddings',
freeze_embeddings=False,
)
embedded_tokens_t_prev = step_model.net.Gather(
[decoder_embeddings, previous_tokens],
'embedded_tokens_t_prev',
)
decoder_cells = []
decoder_units_per_layer = []
for i, layer_config in enumerate(model_params['decoder_layer_configs']):
num_units = layer_config['num_units']
decoder_units_per_layer.append(num_units)
if i == 0:
input_size = model_params['decoder_embedding_size']
else:
input_size = (
model_params['decoder_layer_configs'][i - 1]['num_units']
)
cell = rnn_cell.LSTMCell(
forward_only=True,
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
)
decoder_cells.append(cell)
with core.NameScope(scope):
if final_encoder_hidden_states is not None:
for i in range(len(final_encoder_hidden_states)):
if final_encoder_hidden_states[i] is not None:
final_encoder_hidden_states[i] = model.net.Tile(
final_encoder_hidden_states[i],
'final_encoder_hidden_tiled_{}'.format(i),
tiles=self.beam_size,
axis=1,
)
if final_encoder_cell_states is not None:
for i in range(len(final_encoder_cell_states)):
if final_encoder_cell_states[i] is not None:
final_encoder_cell_states[i] = model.net.Tile(
final_encoder_cell_states[i],
'final_encoder_cell_tiled_{}'.format(i),
tiles=self.beam_size,
axis=1,
)
initial_states = \
seq2seq_util.build_initial_rnn_decoder_states(
model=model,
encoder_units_per_layer=encoder_units_per_layer,
decoder_units_per_layer=decoder_units_per_layer,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
use_attention=use_attention,
)
attention_decoder = seq2seq_util.LSTMWithAttentionDecoder(
encoder_outputs=encoder_outputs,
encoder_output_dim=encoder_units_per_layer[-1],
encoder_lengths=None,
vocab_size=self.target_vocab_size,
attention_type=attention_type,
embedding_size=model_params['decoder_embedding_size'],
decoder_num_units=decoder_units_per_layer[-1],
decoder_cells=decoder_cells,
weighted_encoder_outputs=weighted_encoder_outputs,
name=scope,
)
states_prev = step_model.net.AddExternalInputs(*[
'{}/{}_prev'.format(scope, s)
for s in attention_decoder.get_state_names()
])
decoder_outputs, states = attention_decoder.apply(
model=step_model,
input_t=embedded_tokens_t_prev,
seq_lengths=fake_seq_lengths,
states=states_prev,
timestep=timestep,
)
state_configs = [
BeamSearchForwardOnly.StateConfig(
initial_value=initial_state,
state_prev_link=BeamSearchForwardOnly.LinkConfig(
blob=state_prev,
offset=0,
window=1,
),
state_link=BeamSearchForwardOnly.LinkConfig(
blob=state,
offset=1,
window=1,
),
)
for initial_state, state_prev, state in zip(
initial_states,
states_prev,
states,
)
]
with core.NameScope(scope):
decoder_outputs_flattened, _ = step_model.net.Reshape(
[decoder_outputs],
[
'decoder_outputs_flattened',
'decoder_outputs_and_contexts_combination_old_shape',
],
shape=[-1, attention_decoder.get_output_dim()],
)
output_logits = seq2seq_util.output_projection(
model=step_model,
decoder_outputs=decoder_outputs_flattened,
decoder_output_size=attention_decoder.get_output_dim(),
target_vocab_size=self.target_vocab_size,
decoder_softmax_size=model_params['decoder_softmax_size'],
)
# [1, beam_size, target_vocab_size]
output_probs = step_model.net.Softmax(
output_logits,
'output_probs',
)
output_log_probs = step_model.net.Log(
output_probs,
'output_log_probs',
)
if use_attention:
attention_weights = attention_decoder.get_attention_weights()
else:
attention_weights = step_model.net.ConstantFill(
[self.encoder_inputs],
'zero_attention_weights_tmp_1',
value=0.0,
)
attention_weights = step_model.net.Transpose(
attention_weights,
'zero_attention_weights_tmp_2',
)
attention_weights = step_model.net.Tile(
attention_weights,
'zero_attention_weights_tmp',
tiles=self.beam_size,
axis=0,
)
return (
state_configs,
output_log_probs,
attention_weights,
)
def __init__(
self,
translate_params,
):
self.models = translate_params['ensemble_models']
decoding_params = translate_params['decoding_params']
self.beam_size = decoding_params['beam_size']
assert len(self.models) > 0
source_vocab = self.models[0]['source_vocab']
target_vocab = self.models[0]['target_vocab']
for model in self.models:
assert model['source_vocab'] == source_vocab
assert model['target_vocab'] == target_vocab
self.source_vocab_size = len(source_vocab)
self.target_vocab_size = len(target_vocab)
self.decoder_scope_names = [
'model{}'.format(i) for i in range(len(self.models))
]
self.model = Seq2SeqModelHelper(init_params=True)
self.encoder_inputs = self.model.net.AddExternalInput('encoder_inputs')
self.encoder_lengths = self.model.net.AddExternalInput(
'encoder_lengths'
)
self.max_output_seq_len = self.model.net.AddExternalInput(
'max_output_seq_len'
)
fake_seq_lengths = self.model.param_init_net.ConstantFill(
[],
'fake_seq_lengths',
shape=[self.beam_size],
value=100000,
dtype=core.DataType.INT32,
)
beam_decoder = BeamSearchForwardOnly(
beam_size=self.beam_size,
model=self.model,
go_token_id=seq2seq_util.GO_ID,
eos_token_id=seq2seq_util.EOS_ID,
)
step_model = beam_decoder.get_step_model()
state_configs = []
output_log_probs = []
attention_weights = []
for model, scope_name in zip(
self.models,
self.decoder_scope_names,
):
(
state_configs_per_decoder,
output_log_probs_per_decoder,
attention_weights_per_decoder,
) = self._build_decoder(
model=self.model,
step_model=step_model,
model_params=model['model_params'],
scope=scope_name,
previous_tokens=beam_decoder.get_previous_tokens(),
timestep=beam_decoder.get_timestep(),
fake_seq_lengths=fake_seq_lengths,
)
state_configs.extend(state_configs_per_decoder)
output_log_probs.append(output_log_probs_per_decoder)
if attention_weights_per_decoder is not None:
attention_weights.append(attention_weights_per_decoder)
assert len(attention_weights) > 0
num_decoders_with_attention_blob = (
self.model.param_init_net.ConstantFill(
[],
'num_decoders_with_attention_blob',
value=1 / float(len(attention_weights)),
shape=[1],
)
)
# [beam_size, encoder_length, 1]
attention_weights_average = _weighted_sum(
model=step_model,
values=attention_weights,
weight=num_decoders_with_attention_blob,
output_name='attention_weights_average',
)
num_decoders_blob = self.model.param_init_net.ConstantFill(
[],
'num_decoders_blob',
value=1 / float(len(output_log_probs)),
shape=[1],
)
# [beam_size, target_vocab_size]
output_log_probs_average = _weighted_sum(
model=step_model,
values=output_log_probs,
weight=num_decoders_blob,
output_name='output_log_probs_average',
)
word_rewards = self.model.param_init_net.ConstantFill(
[],
'word_rewards',
shape=[self.target_vocab_size],
value=0.0,
dtype=core.DataType.FLOAT,
)
(
self.output_token_beam_list,
self.output_prev_index_beam_list,
self.output_score_beam_list,
self.output_attention_weights_beam_list,
) = beam_decoder.apply(
inputs=self.encoder_inputs,
length=self.max_output_seq_len,
log_probs=output_log_probs_average,
attentions=attention_weights_average,
state_configs=state_configs,
data_dependencies=[],
word_rewards=word_rewards,
)
workspace.RunNetOnce(self.model.param_init_net)
workspace.FeedBlob(
'word_rewards',
self.build_word_rewards(
vocab_size=self.target_vocab_size,
word_reward=translate_params['decoding_params']['word_reward'],
unk_reward=translate_params['decoding_params']['unk_reward'],
)
)
workspace.CreateNet(
self.model.net,
input_blobs=[
str(self.encoder_inputs),
str(self.encoder_lengths),
str(self.max_output_seq_len),
],
)
logger.info('Params created: ')
for param in self.model.params:
logger.info(param)
def decode(self, numberized_input, max_output_seq_len):
workspace.FeedBlob(
self.encoder_inputs,
np.array([
[token_id] for token_id in reversed(numberized_input)
]).astype(dtype=np.int32),
)
workspace.FeedBlob(
self.encoder_lengths,
np.array([len(numberized_input)]).astype(dtype=np.int32),
)
workspace.FeedBlob(
self.max_output_seq_len,
np.array([max_output_seq_len]).astype(dtype=np.int64),
)
workspace.RunNet(self.model.net)
num_steps = max_output_seq_len
score_beam_list = workspace.FetchBlob(self.output_score_beam_list)
token_beam_list = (
workspace.FetchBlob(self.output_token_beam_list)
)
prev_index_beam_list = (
workspace.FetchBlob(self.output_prev_index_beam_list)
)
attention_weights_beam_list = (
workspace.FetchBlob(self.output_attention_weights_beam_list)
)
best_indices = (num_steps, 0)
for i in range(num_steps + 1):
for hyp_index in range(self.beam_size):
if (
(
token_beam_list[i][hyp_index][0] ==
seq2seq_util.EOS_ID or
i == num_steps
) and
(
score_beam_list[i][hyp_index][0] >
score_beam_list[best_indices[0]][best_indices[1]][0]
)
):
best_indices = (i, hyp_index)
i, hyp_index = best_indices
output = []
attention_weights_per_token = []
best_score = -score_beam_list[i][hyp_index][0]
while i > 0:
output.append(token_beam_list[i][hyp_index][0])
attention_weights_per_token.append(
attention_weights_beam_list[i][hyp_index]
)
hyp_index = prev_index_beam_list[i][hyp_index][0]
i -= 1
attention_weights_per_token = reversed(attention_weights_per_token)
# encoder_inputs are reversed, see get_batch func
attention_weights_per_token = [
list(reversed(attention_weights))[:len(numberized_input)]
for attention_weights in attention_weights_per_token
]
output = list(reversed(output))
return output, attention_weights_per_token, best_score
def run_seq2seq_beam_decoder(args, model_params, decoding_params):
source_vocab = seq2seq_util.gen_vocab(
args.source_corpus,
args.unk_threshold,
)
logger.info('Source vocab size {}'.format(len(source_vocab)))
target_vocab = seq2seq_util.gen_vocab(
args.target_corpus,
args.unk_threshold,
)
inversed_target_vocab = {v: k for (k, v) in viewitems(target_vocab)}
logger.info('Target vocab size {}'.format(len(target_vocab)))
decoder = Seq2SeqModelCaffe2EnsembleDecoder(
translate_params=dict(
ensemble_models=[dict(
source_vocab=source_vocab,
target_vocab=target_vocab,
model_params=model_params,
model_file=args.checkpoint,
)],
decoding_params=decoding_params,
),
)
decoder.load_models()
for line in sys.stdin:
numerized_source_sentence = seq2seq_util.get_numberized_sentence(
line,
source_vocab,
)
translation, alignment, _ = decoder.decode(
numerized_source_sentence,
2 * len(numerized_source_sentence) + 5,
)
print(' '.join([inversed_target_vocab[tid] for tid in translation]))
def main():
parser = argparse.ArgumentParser(
description='Caffe2: Seq2Seq Translation',
)
parser.add_argument('--source-corpus', type=str, default=None,
help='Path to source corpus in a text file format. Each '
'line in the file should contain a single sentence',
required=True)
parser.add_argument('--target-corpus', type=str, default=None,
help='Path to target corpus in a text file format',
required=True)
parser.add_argument('--unk-threshold', type=int, default=50,
help='Threshold frequency under which token becomes '
'labeled unknown token')
parser.add_argument('--use-bidirectional-encoder', action='store_true',
help='Set flag to use bidirectional recurrent network '
'in encoder')
parser.add_argument('--use-attention', action='store_true',
help='Set flag to use seq2seq with attention model')
parser.add_argument('--encoder-cell-num-units', type=int, default=512,
help='Number of cell units per encoder layer')
parser.add_argument('--encoder-num-layers', type=int, default=2,
help='Number encoder layers')
parser.add_argument('--decoder-cell-num-units', type=int, default=512,
help='Number of cell units in the decoder layer')
parser.add_argument('--decoder-num-layers', type=int, default=2,
help='Number decoder layers')
parser.add_argument('--encoder-embedding-size', type=int, default=256,
help='Size of embedding in the encoder layer')
parser.add_argument('--decoder-embedding-size', type=int, default=512,
help='Size of embedding in the decoder layer')
parser.add_argument('--decoder-softmax-size', type=int, default=None,
help='Size of softmax layer in the decoder')
parser.add_argument('--beam-size', type=int, default=6,
help='Size of beam for the decoder')
parser.add_argument('--word-reward', type=float, default=0.0,
help='Reward per each word generated.')
parser.add_argument('--unk-reward', type=float, default=0.0,
help='Reward per each UNK token generated. '
'Typically should be negative.')
parser.add_argument('--checkpoint', type=str, default=None,
help='Path to checkpoint', required=True)
args = parser.parse_args()
encoder_layer_configs = [
dict(
num_units=args.encoder_cell_num_units,
),
] * args.encoder_num_layers
if args.use_bidirectional_encoder:
assert args.encoder_cell_num_units % 2 == 0
encoder_layer_configs[0]['num_units'] /= 2
decoder_layer_configs = [
dict(
num_units=args.decoder_cell_num_units,
),
] * args.decoder_num_layers
run_seq2seq_beam_decoder(
args,
model_params=dict(
attention=('regular' if args.use_attention else 'none'),
decoder_layer_configs=decoder_layer_configs,
encoder_type=dict(
encoder_layer_configs=encoder_layer_configs,
use_bidirectional_encoder=args.use_bidirectional_encoder,
),
encoder_embedding_size=args.encoder_embedding_size,
decoder_embedding_size=args.decoder_embedding_size,
decoder_softmax_size=args.decoder_softmax_size,
),
decoding_params=dict(
beam_size=args.beam_size,
word_reward=args.word_reward,
unk_reward=args.unk_reward,
),
)
if __name__ == '__main__':
main()
|
pytorch-master
|
caffe2/python/models/seq2seq/translate.py
|
import numpy as np
import os
import tempfile
from caffe2.python import test_util, workspace
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
from caffe2.python.models.seq2seq.train import Seq2SeqModelCaffe2
from caffe2.python.models.seq2seq.translate import (
Seq2SeqModelCaffe2EnsembleDecoder,
)
class Seq2SeqBeamSearchTest(test_util.TestCase):
def _build_seq2seq_model(
self,
model_params,
tmp_dir,
source_vocab_size=20,
target_vocab_size=20,
num_gpus=0,
batch_size=2,
):
training_params = dict(
model_params,
batch_size=batch_size,
optimizer_params=dict(
learning_rate=0.1,
),
max_gradient_norm=1.0,
)
model_obj = Seq2SeqModelCaffe2(
training_params,
source_vocab_size,
target_vocab_size,
num_gpus,
)
model_obj.initialize_from_scratch()
checkpoint_path_prefix = os.path.join(tmp_dir, 'checkpoint')
checkpoint_path = model_obj.save(
checkpoint_path_prefix=checkpoint_path_prefix,
current_step=0,
)
return model_obj, checkpoint_path
def _run_compare_train_inference(self, model_params):
tmp_dir = tempfile.mkdtemp()
model_obj, checkpoint_path = self._build_seq2seq_model(
model_params,
tmp_dir=tmp_dir,
source_vocab_size=20,
target_vocab_size=20,
num_gpus=0,
batch_size=2,
)
assert model_obj is not None
translate_params = dict(
ensemble_models=[dict(
source_vocab={i: str(i) for i in range(20)},
target_vocab={i: str(i) for i in range(20)},
model_params=model_params,
model_file=checkpoint_path,
)],
decoding_params=dict(
beam_size=3,
word_reward=0,
unk_reward=0,
),
)
beam_decoder_model = Seq2SeqModelCaffe2EnsembleDecoder(translate_params)
beam_decoder_model.load_models()
encoder_lengths = 5
decoder_lengths = 7
for _ in range(3):
encoder_inputs = np.random.random_integers(
low=3, # after GO_ID (1) and EOS_ID (2)
high=19,
size=encoder_lengths,
)
targets, _, beam_model_score = beam_decoder_model.decode(
encoder_inputs,
decoder_lengths,
)
targets_2, _, beam_model_score = beam_decoder_model.decode(
encoder_inputs,
decoder_lengths,
)
self.assertEqual(targets, targets_2)
workspace.FeedBlob(
'encoder_inputs',
np.array(
[list(reversed(encoder_inputs))]
).transpose().astype(dtype=np.int32))
workspace.FeedBlob(
'encoder_lengths',
np.array([len(encoder_inputs)]).astype(dtype=np.int32),
)
decoder_inputs = [seq2seq_util.GO_ID] + targets[:-1]
workspace.FeedBlob(
'decoder_inputs',
np.array([decoder_inputs]).transpose().astype(dtype=np.int32),
)
workspace.FeedBlob(
'decoder_lengths',
np.array([len(decoder_inputs)]).astype(dtype=np.int32),
)
workspace.FeedBlob(
'targets',
np.array([targets]).transpose().astype(dtype=np.int32),
)
workspace.FeedBlob(
'target_weights',
np.array([[1.0] * len(targets)]).astype(dtype=np.float32),
)
workspace.RunNet(model_obj.forward_net)
train_model_score = workspace.FetchBlob('total_loss_scalar')
np.testing.assert_almost_equal(
beam_model_score,
train_model_score,
decimal=4,
)
def test_attention(self):
model_params = dict(
attention='regular',
decoder_layer_configs=[
dict(
num_units=32,
),
],
encoder_type=dict(
encoder_layer_configs=[
dict(
num_units=16,
),
],
use_bidirectional_encoder=True,
),
encoder_embedding_size=8,
decoder_embedding_size=8,
decoder_softmax_size=None,
)
self._run_compare_train_inference(model_params)
def test_2layer_attention(self):
model_params = dict(
attention='regular',
decoder_layer_configs=[
dict(
num_units=32,
),
dict(
num_units=32,
),
],
encoder_type=dict(
encoder_layer_configs=[
dict(
num_units=16,
),
dict(
num_units=32,
),
],
use_bidirectional_encoder=True,
),
encoder_embedding_size=8,
decoder_embedding_size=8,
decoder_softmax_size=None,
)
self._run_compare_train_inference(model_params)
def test_multi_decoder(self):
model_params = dict(
attention='regular',
decoder_layer_configs=[
dict(
num_units=32,
),
dict(
num_units=32,
),
dict(
num_units=32,
),
],
encoder_type=dict(
encoder_layer_configs=[
dict(
num_units=32,
),
],
use_bidirectional_encoder=False,
),
encoder_embedding_size=8,
decoder_embedding_size=8,
decoder_softmax_size=None,
)
self._run_compare_train_inference(model_params)
|
pytorch-master
|
caffe2/python/models/seq2seq/seq2seq_beam_search_test.py
|
## @package seq2seq_model_helper
# Module caffe2.python.models.seq2seq.seq2seq_model_helper
from caffe2.python import scope
from caffe2.python.model_helper import ModelHelper
class Seq2SeqModelHelper(ModelHelper):
def __init__(self, init_params=True, **kwargs):
arg_scope = {
'use_cudnn': kwargs.pop('use_cudnn', True),
'cudnn_exhaustive_search': kwargs.pop('cudnn_exhaustive_search', False),
'order': 'NHWC',
}
if kwargs.get('ws_nbytes_limit', None):
arg_scope['ws_nbytes_limit'] = kwargs.pop('ws_nbytes_limit')
super(Seq2SeqModelHelper, self).__init__(
init_params=init_params,
arg_scope=arg_scope,
**kwargs
)
self.non_trainable_params = []
def AddParam(self, name, init=None, init_value=None, trainable=True):
"""Adds a parameter to the model's net and it's initializer if needed
Args:
init: a tuple (<initialization_op_name>, <initialization_op_kwargs>)
init_value: int, float or str. Can be used instead of `init` as a
simple constant initializer
trainable: bool, whether to compute gradient for this param or not
"""
if init_value is not None:
assert init is None
assert type(init_value) in [int, float, str]
init = ('ConstantFill', dict(
shape=[1],
value=init_value,
))
if self.init_params:
param = self.param_init_net.__getattr__(init[0])(
[],
name,
**init[1]
)
else:
param = self.net.AddExternalInput(name)
if trainable:
self.params.append(param)
else:
self.non_trainable_params.append(param)
return param
def GetNonTrainableParams(self, namescope=None):
'''
Returns the params in current namescope
'''
if namescope is None:
namescope = scope.CurrentNameScope()
else:
if not namescope.endswith(scope._NAMESCOPE_SEPARATOR):
namescope += scope._NAMESCOPE_SEPARATOR
if namescope == '':
return self.non_trainable_params[:]
else:
return [
p for p in self.non_trainable_params
if p.GetNameScope() == namescope
]
def GetAllParams(self, namescope=None):
return (
self.GetParams(namescope) +
self.GetComputedParams(namescope) +
self.GetNonTrainableParams(namescope)
)
|
pytorch-master
|
caffe2/python/models/seq2seq/seq2seq_model_helper.py
|
## @package seq2seq_util
# Module caffe2.python.examples.seq2seq_util
""" A bunch of util functions to build Seq2Seq models with Caffe2."""
import collections
from future.utils import viewitems
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import attention, core, rnn_cell, brew
PAD_ID = 0
PAD = '<PAD>'
GO_ID = 1
GO = '<GO>'
EOS_ID = 2
EOS = '<EOS>'
UNK_ID = 3
UNK = '<UNK>'
def gen_vocab(corpus, unk_threshold):
vocab = collections.defaultdict(lambda: len(vocab))
freqs = collections.defaultdict(lambda: 0)
# Adding padding tokens to the vocabulary to maintain consistency with IDs
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
for sentence in f:
tokens = sentence.strip().split()
for token in tokens:
freqs[token] += 1
for token, freq in viewitems(freqs):
if freq > unk_threshold:
vocab[token]
return vocab
def get_numberized_sentence(sentence, vocab):
numerized_sentence = []
for token in sentence.strip().split():
if token in vocab:
numerized_sentence.append(vocab[token])
else:
numerized_sentence.append(vocab[UNK])
return numerized_sentence
def rnn_unidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=None,
):
""" Unidirectional LSTM encoder."""
with core.NameScope(scope):
initial_cell_state = model.param_init_net.ConstantFill(
[],
'initial_cell_state',
shape=[num_units],
value=0.0,
)
initial_hidden_state = model.param_init_net.ConstantFill(
[],
'initial_hidden_state',
shape=[num_units],
value=0.0,
)
cell = rnn_cell.LSTMCell(
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
name=(scope + '/' if scope else '') + 'lstm',
forward_only=forward_only,
)
dropout_ratio = (
None if dropout_keep_prob is None else (1.0 - dropout_keep_prob)
)
if dropout_ratio is not None:
cell = rnn_cell.DropoutCell(
internal_cell=cell,
dropout_ratio=dropout_ratio,
name=(scope + '/' if scope else '') + 'dropout',
forward_only=forward_only,
is_test=False,
)
outputs_with_grads = []
if return_sequence_output:
outputs_with_grads.append(0)
if return_final_state:
outputs_with_grads.extend([1, 3])
outputs, (_, final_hidden_state, _, final_cell_state) = (
cell.apply_over_sequence(
model=model,
inputs=inputs,
seq_lengths=input_lengths,
initial_states=(initial_hidden_state, initial_cell_state),
outputs_with_grads=outputs_with_grads,
)
)
return outputs, final_hidden_state, final_cell_state
def rnn_bidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=None,
):
outputs_fw, final_hidden_fw, final_cell_fw = rnn_unidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=(scope + '/' if scope else '') + 'fw',
)
with core.NameScope(scope):
reversed_inputs = model.net.ReversePackedSegs(
[inputs, input_lengths],
['reversed_inputs'],
)
outputs_bw, final_hidden_bw, final_cell_bw = rnn_unidirectional_layer(
model,
reversed_inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=(scope + '/' if scope else '') + 'bw',
)
with core.NameScope(scope):
outputs_bw = model.net.ReversePackedSegs(
[outputs_bw, input_lengths],
['outputs_bw'],
)
# Concatenate forward and backward results
if return_sequence_output:
with core.NameScope(scope):
outputs, _ = model.net.Concat(
[outputs_fw, outputs_bw],
['outputs', 'outputs_dim'],
axis=2,
)
else:
outputs = None
if return_final_state:
with core.NameScope(scope):
final_hidden_state, _ = model.net.Concat(
[final_hidden_fw, final_hidden_bw],
['final_hidden_state', 'final_hidden_state_dim'],
axis=2,
)
final_cell_state, _ = model.net.Concat(
[final_cell_fw, final_cell_bw],
['final_cell_state', 'final_cell_state_dim'],
axis=2,
)
else:
final_hidden_state = None
final_cell_state = None
return outputs, final_hidden_state, final_cell_state
def build_embeddings(
model,
vocab_size,
embedding_size,
name,
freeze_embeddings,
):
embeddings = model.param_init_net.GaussianFill(
[],
name,
shape=[vocab_size, embedding_size],
std=0.1,
)
if not freeze_embeddings:
model.params.append(embeddings)
return embeddings
def get_layer_scope(scope, layer_type, i):
prefix = (scope + '/' if scope else '') + layer_type
return '{}/layer{}'.format(prefix, i)
def build_embedding_encoder(
model,
encoder_params,
num_decoder_layers,
inputs,
input_lengths,
vocab_size,
embeddings,
embedding_size,
use_attention,
num_gpus=0,
forward_only=False,
scope=None,
):
with core.NameScope(scope or ''):
if num_gpus == 0:
embedded_encoder_inputs = model.net.Gather(
[embeddings, inputs],
['embedded_encoder_inputs'],
)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
embedded_encoder_inputs_cpu = model.net.Gather(
[embeddings, inputs],
['embedded_encoder_inputs_cpu'],
)
embedded_encoder_inputs = model.CopyCPUToGPU(
embedded_encoder_inputs_cpu,
'embedded_encoder_inputs',
)
layer_inputs = embedded_encoder_inputs
layer_input_size = embedding_size
encoder_units_per_layer = []
final_encoder_hidden_states = []
final_encoder_cell_states = []
num_encoder_layers = len(encoder_params['encoder_layer_configs'])
use_bidirectional_encoder = encoder_params.get(
'use_bidirectional_encoder',
False,
)
for i, layer_config in enumerate(encoder_params['encoder_layer_configs']):
if use_bidirectional_encoder and i == 0:
layer_func = rnn_bidirectional_layer
output_dims = 2 * layer_config['num_units']
else:
layer_func = rnn_unidirectional_layer
output_dims = layer_config['num_units']
encoder_units_per_layer.append(output_dims)
is_final_layer = (i == num_encoder_layers - 1)
dropout_keep_prob = layer_config.get(
'dropout_keep_prob',
None,
)
return_final_state = i >= (num_encoder_layers - num_decoder_layers)
(
layer_outputs,
final_layer_hidden_state,
final_layer_cell_state,
) = layer_func(
model=model,
inputs=layer_inputs,
input_lengths=input_lengths,
input_size=layer_input_size,
num_units=layer_config['num_units'],
dropout_keep_prob=dropout_keep_prob,
forward_only=forward_only,
return_sequence_output=(not is_final_layer) or use_attention,
return_final_state=return_final_state,
scope=get_layer_scope(scope, 'encoder', i),
)
if not is_final_layer:
layer_inputs = layer_outputs
layer_input_size = output_dims
final_encoder_hidden_states.append(final_layer_hidden_state)
final_encoder_cell_states.append(final_layer_cell_state)
encoder_outputs = layer_outputs
weighted_encoder_outputs = None
return (
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
)
class LSTMWithAttentionDecoder(object):
def scope(self, name):
return self.name + '/' + name if self.name is not None else name
def _get_attention_type(self, attention_type_as_string):
if attention_type_as_string == 'regular':
return attention.AttentionType.Regular
elif attention_type_as_string == 'recurrent':
return attention.AttentionType.Recurrent
else:
assert False, 'Unknown type ' + attention_type_as_string
def __init__(
self,
encoder_outputs,
encoder_output_dim,
encoder_lengths,
vocab_size,
attention_type,
embedding_size,
decoder_num_units,
decoder_cells,
residual_output_layers=None,
name=None,
weighted_encoder_outputs=None,
):
self.name = name
self.num_layers = len(decoder_cells)
if attention_type == 'none':
self.cell = rnn_cell.MultiRNNCell(
decoder_cells,
name=self.scope('decoder'),
residual_output_layers=residual_output_layers,
)
self.use_attention = False
self.decoder_output_dim = decoder_num_units
self.output_indices = self.cell.output_indices
else:
decoder_cell = rnn_cell.MultiRNNCell(
decoder_cells,
name=self.scope('decoder'),
residual_output_layers=residual_output_layers,
)
self.cell = rnn_cell.AttentionCell(
encoder_output_dim=encoder_output_dim,
encoder_outputs=encoder_outputs,
encoder_lengths=encoder_lengths,
decoder_cell=decoder_cell,
decoder_state_dim=decoder_num_units,
name=self.scope('attention_decoder'),
attention_type=self._get_attention_type(attention_type),
weighted_encoder_outputs=weighted_encoder_outputs,
attention_memory_optimization=True,
)
self.use_attention = True
self.decoder_output_dim = decoder_num_units + encoder_output_dim
self.output_indices = decoder_cell.output_indices
self.output_indices.append(2 * self.num_layers)
def get_state_names(self):
return self.cell.get_state_names()
def get_outputs_with_grads(self):
# sequence (all) output locations are at twice their state index
return [2 * i for i in self.output_indices]
def get_output_dim(self):
return self.decoder_output_dim
def get_attention_weights(self):
assert self.use_attention
# [batch_size, encoder_length, 1]
return self.cell.get_attention_weights()
def apply(
self,
model,
input_t,
seq_lengths,
states,
timestep,
):
return self.cell.apply(
model=model,
input_t=input_t,
seq_lengths=seq_lengths,
states=states,
timestep=timestep,
)
def apply_over_sequence(
self,
model,
inputs,
seq_lengths,
initial_states,
):
return self.cell.apply_over_sequence(
model=model,
inputs=inputs,
seq_lengths=seq_lengths,
initial_states=initial_states,
outputs_with_grads=self.get_outputs_with_grads(),
)
def build_initial_rnn_decoder_states(
model,
encoder_units_per_layer,
decoder_units_per_layer,
final_encoder_hidden_states,
final_encoder_cell_states,
use_attention,
):
num_encoder_layers = len(encoder_units_per_layer)
num_decoder_layers = len(decoder_units_per_layer)
if num_encoder_layers > num_decoder_layers:
offset = num_encoder_layers - num_decoder_layers
else:
offset = 0
initial_states = []
for i, decoder_num_units in enumerate(decoder_units_per_layer):
if (
final_encoder_hidden_states and
len(final_encoder_hidden_states) > (i + offset)
):
final_encoder_hidden_state = final_encoder_hidden_states[i + offset]
else:
final_encoder_hidden_state = None
if final_encoder_hidden_state is None:
decoder_initial_hidden_state = model.param_init_net.ConstantFill(
[],
'decoder_initial_hidden_state_{}'.format(i),
shape=[decoder_num_units],
value=0.0,
)
model.params.append(decoder_initial_hidden_state)
elif decoder_num_units != encoder_units_per_layer[i + offset]:
decoder_initial_hidden_state = brew.fc(
model,
final_encoder_hidden_state,
'decoder_initial_hidden_state_{}'.format(i),
encoder_units_per_layer[i + offset],
decoder_num_units,
axis=2,
)
else:
decoder_initial_hidden_state = final_encoder_hidden_state
initial_states.append(decoder_initial_hidden_state)
if (
final_encoder_cell_states and
len(final_encoder_cell_states) > (i + offset)
):
final_encoder_cell_state = final_encoder_cell_states[i + offset]
else:
final_encoder_cell_state = None
if final_encoder_cell_state is None:
decoder_initial_cell_state = model.param_init_net.ConstantFill(
[],
'decoder_initial_cell_state_{}'.format(i),
shape=[decoder_num_units],
value=0.0,
)
model.params.append(decoder_initial_cell_state)
elif decoder_num_units != encoder_units_per_layer[i + offset]:
decoder_initial_cell_state = brew.fc(
model,
final_encoder_cell_state,
'decoder_initial_cell_state_{}'.format(i),
encoder_units_per_layer[i + offset],
decoder_num_units,
axis=2,
)
else:
decoder_initial_cell_state = final_encoder_cell_state
initial_states.append(decoder_initial_cell_state)
if use_attention:
initial_attention_weighted_encoder_context = (
model.param_init_net.ConstantFill(
[],
'initial_attention_weighted_encoder_context',
shape=[encoder_units_per_layer[-1]],
value=0.0,
)
)
model.params.append(initial_attention_weighted_encoder_context)
initial_states.append(initial_attention_weighted_encoder_context)
return initial_states
def build_embedding_decoder(
model,
decoder_layer_configs,
inputs,
input_lengths,
encoder_lengths,
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
vocab_size,
embeddings,
embedding_size,
attention_type,
forward_only,
num_gpus=0,
scope=None,
):
with core.NameScope(scope or ''):
if num_gpus == 0:
embedded_decoder_inputs = model.net.Gather(
[embeddings, inputs],
['embedded_decoder_inputs'],
)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
embedded_decoder_inputs_cpu = model.net.Gather(
[embeddings, inputs],
['embedded_decoder_inputs_cpu'],
)
embedded_decoder_inputs = model.CopyCPUToGPU(
embedded_decoder_inputs_cpu,
'embedded_decoder_inputs',
)
decoder_cells = []
decoder_units_per_layer = []
for i, layer_config in enumerate(decoder_layer_configs):
num_units = layer_config['num_units']
decoder_units_per_layer.append(num_units)
if i == 0:
input_size = embedding_size
else:
input_size = decoder_cells[-1].get_output_dim()
cell = rnn_cell.LSTMCell(
forward_only=forward_only,
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
)
dropout_keep_prob = layer_config.get('dropout_keep_prob', None)
if dropout_keep_prob is not None:
dropout_ratio = 1.0 - layer_config.dropout_keep_prob
cell = rnn_cell.DropoutCell(
internal_cell=cell,
dropout_ratio=dropout_ratio,
forward_only=forward_only,
is_test=False,
name=get_layer_scope(scope, 'decoder_dropout', i),
)
decoder_cells.append(cell)
states = build_initial_rnn_decoder_states(
model=model,
encoder_units_per_layer=encoder_units_per_layer,
decoder_units_per_layer=decoder_units_per_layer,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
use_attention=(attention_type != 'none'),
)
attention_decoder = LSTMWithAttentionDecoder(
encoder_outputs=encoder_outputs,
encoder_output_dim=encoder_units_per_layer[-1],
encoder_lengths=encoder_lengths,
vocab_size=vocab_size,
attention_type=attention_type,
embedding_size=embedding_size,
decoder_num_units=decoder_units_per_layer[-1],
decoder_cells=decoder_cells,
weighted_encoder_outputs=weighted_encoder_outputs,
name=scope,
)
decoder_outputs, _ = attention_decoder.apply_over_sequence(
model=model,
inputs=embedded_decoder_inputs,
seq_lengths=input_lengths,
initial_states=states,
)
# we do softmax over the whole sequence
# (max_length in the batch * batch_size) x decoder embedding size
# -1 because we don't know max_length yet
decoder_outputs_flattened, _ = model.net.Reshape(
[decoder_outputs],
[
'decoder_outputs_flattened',
'decoder_outputs_and_contexts_combination_old_shape',
],
shape=[-1, attention_decoder.get_output_dim()],
)
decoder_outputs = decoder_outputs_flattened
decoder_output_dim = attention_decoder.get_output_dim()
return (decoder_outputs, decoder_output_dim)
def output_projection(
model,
decoder_outputs,
decoder_output_size,
target_vocab_size,
decoder_softmax_size,
):
if decoder_softmax_size is not None:
decoder_outputs = brew.fc(
model,
decoder_outputs,
'decoder_outputs_scaled',
dim_in=decoder_output_size,
dim_out=decoder_softmax_size,
)
decoder_output_size = decoder_softmax_size
output_projection_w = model.param_init_net.XavierFill(
[],
'output_projection_w',
shape=[target_vocab_size, decoder_output_size],
)
output_projection_b = model.param_init_net.XavierFill(
[],
'output_projection_b',
shape=[target_vocab_size],
)
model.params.extend([
output_projection_w,
output_projection_b,
])
output_logits = model.net.FC(
[
decoder_outputs,
output_projection_w,
output_projection_b,
],
['output_logits'],
)
return output_logits
|
pytorch-master
|
caffe2/python/models/seq2seq/seq2seq_util.py
|
pytorch-master
|
caffe2/python/models/seq2seq/__init__.py
|
|
from caffe2.python.models.seq2seq import seq2seq_model_helper
from caffe2.python import scope, test_util
class Seq2SeqModelHelperTest(test_util.TestCase):
def testConstuctor(self):
model_name = 'TestModel'
m = seq2seq_model_helper.Seq2SeqModelHelper(name=model_name)
self.assertEqual(m.name, model_name)
self.assertEqual(m.init_params, True)
self.assertEqual(m.arg_scope, {
'use_cudnn': True,
'cudnn_exhaustive_search': False,
'order': 'NHWC'
})
def testAddParam(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
param_name = 'test_param'
param = m.AddParam(param_name, init_value=1)
self.assertEqual(str(param), param_name)
def testGetNonTrainableParams(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
m.AddParam('test_param1', init_value=1, trainable=True)
p2 = m.AddParam('test_param2', init_value=2, trainable=False)
self.assertEqual(
m.GetNonTrainableParams(),
[p2]
)
with scope.NameScope('A', reset=True):
p3 = m.AddParam('test_param3', init_value=3, trainable=False)
self.assertEqual(
m.GetNonTrainableParams(),
[p3]
)
self.assertEqual(
m.GetNonTrainableParams(),
[p2, p3]
)
def testGetAllParams(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
p1 = m.AddParam('test_param1', init_value=1, trainable=True)
p2 = m.AddParam('test_param2', init_value=2, trainable=False)
self.assertEqual(
m.GetAllParams(),
[p1, p2]
)
if __name__ == "__main__":
import unittest
import random
random.seed(2221)
unittest.main()
|
pytorch-master
|
caffe2/python/models/seq2seq/seq2seq_model_helper_test.py
|
## @package beam_search
# Module caffe2.python.models.seq2seq.beam_search
from collections import namedtuple
from caffe2.python import core
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
class BeamSearchForwardOnly(object):
"""
Class generalizing forward beam search for seq2seq models.
Also provides types to specify the recurrent structure of decoding:
StateConfig:
initial_value: blob providing value of state at first step_model
state_prev_link: LinkConfig describing how recurrent step receives
input from global state blob in each step
state_link: LinkConfig describing how step writes (produces new state)
to global state blob in each step
LinkConfig:
blob: blob connecting global state blob to step application
offset: offset from beginning of global blob for link in time dimension
window: width of global blob to read/write in time dimension
"""
LinkConfig = namedtuple('LinkConfig', ['blob', 'offset', 'window'])
StateConfig = namedtuple(
'StateConfig',
['initial_value', 'state_prev_link', 'state_link'],
)
def __init__(
self,
beam_size,
model,
eos_token_id,
go_token_id=seq2seq_util.GO_ID,
post_eos_penalty=None,
):
self.beam_size = beam_size
self.model = model
self.step_model = Seq2SeqModelHelper(
name='step_model',
param_model=self.model,
)
self.go_token_id = go_token_id
self.eos_token_id = eos_token_id
self.post_eos_penalty = post_eos_penalty
(
self.timestep,
self.scores_t_prev,
self.tokens_t_prev,
self.hypo_t_prev,
self.attention_t_prev,
) = self.step_model.net.AddExternalInputs(
'timestep',
'scores_t_prev',
'tokens_t_prev',
'hypo_t_prev',
'attention_t_prev',
)
tokens_t_prev_int32 = self.step_model.net.Cast(
self.tokens_t_prev,
'tokens_t_prev_int32',
to=core.DataType.INT32,
)
self.tokens_t_prev_int32_flattened, _ = self.step_model.net.Reshape(
[tokens_t_prev_int32],
[tokens_t_prev_int32, 'input_t_int32_old_shape'],
shape=[1, -1],
)
def get_step_model(self):
return self.step_model
def get_previous_tokens(self):
return self.tokens_t_prev_int32_flattened
def get_timestep(self):
return self.timestep
# TODO: make attentions a generic state
# data_dependencies is a list of blobs that the operator should wait for
# before beginning execution. This ensures that ops are run in the correct
# order when the RecurrentNetwork op is embedded in a DAGNet, for ex.
def apply(
self,
inputs,
length,
log_probs,
attentions,
state_configs,
data_dependencies,
word_rewards=None,
possible_translation_tokens=None,
go_token_id=None,
):
ZERO = self.model.param_init_net.ConstantFill(
[],
'ZERO',
shape=[1],
value=0,
dtype=core.DataType.INT32,
)
on_initial_step = self.step_model.net.EQ(
[ZERO, self.timestep],
'on_initial_step',
)
if self.post_eos_penalty is not None:
eos_token = self.model.param_init_net.ConstantFill(
[],
'eos_token',
shape=[self.beam_size],
value=self.eos_token_id,
dtype=core.DataType.INT32,
)
finished_penalty = self.model.param_init_net.ConstantFill(
[],
'finished_penalty',
shape=[1],
value=float(self.post_eos_penalty),
dtype=core.DataType.FLOAT,
)
ZERO_FLOAT = self.model.param_init_net.ConstantFill(
[],
'ZERO_FLOAT',
shape=[1],
value=0.0,
dtype=core.DataType.FLOAT,
)
finished_penalty = self.step_model.net.Conditional(
[on_initial_step, ZERO_FLOAT, finished_penalty],
'possible_finished_penalty',
)
tokens_t_flat = self.step_model.net.FlattenToVec(
self.tokens_t_prev,
'tokens_t_flat',
)
tokens_t_flat_int = self.step_model.net.Cast(
tokens_t_flat,
'tokens_t_flat_int',
to=core.DataType.INT32,
)
predecessor_is_eos = self.step_model.net.EQ(
[tokens_t_flat_int, eos_token],
'predecessor_is_eos',
)
predecessor_is_eos_float = self.step_model.net.Cast(
predecessor_is_eos,
'predecessor_is_eos_float',
to=core.DataType.FLOAT,
)
predecessor_is_eos_penalty = self.step_model.net.Mul(
[predecessor_is_eos_float, finished_penalty],
'predecessor_is_eos_penalty',
broadcast=1,
)
log_probs = self.step_model.net.Add(
[log_probs, predecessor_is_eos_penalty],
'log_probs_penalized',
broadcast=1,
axis=0,
)
# [beam_size, beam_size]
best_scores_per_hypo, best_tokens_per_hypo = self.step_model.net.TopK(
log_probs,
['best_scores_per_hypo', 'best_tokens_per_hypo_indices'],
k=self.beam_size,
)
if possible_translation_tokens:
# [beam_size, beam_size]
best_tokens_per_hypo = self.step_model.net.Gather(
[possible_translation_tokens, best_tokens_per_hypo],
['best_tokens_per_hypo']
)
# [beam_size]
scores_t_prev_squeezed, _ = self.step_model.net.Reshape(
self.scores_t_prev,
['scores_t_prev_squeezed', 'scores_t_prev_old_shape'],
shape=[self.beam_size],
)
# [beam_size, beam_size]
output_scores = self.step_model.net.Add(
[best_scores_per_hypo, scores_t_prev_squeezed],
'output_scores',
broadcast=1,
axis=0,
)
if word_rewards is not None:
# [beam_size, beam_size]
word_rewards_for_best_tokens_per_hypo = self.step_model.net.Gather(
[word_rewards, best_tokens_per_hypo],
'word_rewards_for_best_tokens_per_hypo',
)
# [beam_size, beam_size]
output_scores = self.step_model.net.Add(
[output_scores, word_rewards_for_best_tokens_per_hypo],
'output_scores',
)
# [beam_size * beam_size]
output_scores_flattened, _ = self.step_model.net.Reshape(
[output_scores],
[output_scores, 'output_scores_old_shape'],
shape=[-1],
)
MINUS_ONE_INT32 = self.model.param_init_net.ConstantFill(
[],
'MINUS_ONE_INT32',
value=-1,
shape=[1],
dtype=core.DataType.INT32,
)
BEAM_SIZE = self.model.param_init_net.ConstantFill(
[],
'beam_size',
shape=[1],
value=self.beam_size,
dtype=core.DataType.INT32,
)
# current_beam_size (predecessor states from previous step)
# is 1 on first step (so we just need beam_size scores),
# and beam_size subsequently (so we need all beam_size * beam_size
# scores)
slice_end = self.step_model.net.Conditional(
[on_initial_step, BEAM_SIZE, MINUS_ONE_INT32],
['slice_end'],
)
# [current_beam_size * beam_size]
output_scores_flattened_slice = self.step_model.net.Slice(
[output_scores_flattened, ZERO, slice_end],
'output_scores_flattened_slice',
)
# [1, current_beam_size * beam_size]
output_scores_flattened_slice, _ = self.step_model.net.Reshape(
output_scores_flattened_slice,
[
output_scores_flattened_slice,
'output_scores_flattened_slice_old_shape',
],
shape=[1, -1],
)
# [1, beam_size]
scores_t, best_indices = self.step_model.net.TopK(
output_scores_flattened_slice,
['scores_t', 'best_indices'],
k=self.beam_size,
)
BEAM_SIZE_64 = self.model.param_init_net.Cast(
BEAM_SIZE,
'BEAM_SIZE_64',
to=core.DataType.INT64,
)
# [1, beam_size]
hypo_t_int32 = self.step_model.net.Div(
[best_indices, BEAM_SIZE_64],
'hypo_t_int32',
broadcast=1,
)
hypo_t = self.step_model.net.Cast(
hypo_t_int32,
'hypo_t',
to=core.DataType.FLOAT,
)
# [beam_size, encoder_length, 1]
attention_t = self.step_model.net.Gather(
[attentions, hypo_t_int32],
'attention_t',
)
# [1, beam_size, encoder_length]
attention_t, _ = self.step_model.net.Reshape(
attention_t,
[attention_t, 'attention_t_old_shape'],
shape=[1, self.beam_size, -1],
)
# [beam_size * beam_size]
best_tokens_per_hypo_flatten, _ = self.step_model.net.Reshape(
best_tokens_per_hypo,
[
'best_tokens_per_hypo_flatten',
'best_tokens_per_hypo_old_shape',
],
shape=[-1],
)
tokens_t_int32 = self.step_model.net.Gather(
[best_tokens_per_hypo_flatten, best_indices],
'tokens_t_int32',
)
tokens_t = self.step_model.net.Cast(
tokens_t_int32,
'tokens_t',
to=core.DataType.FLOAT,
)
def choose_state_per_hypo(state_config):
state_flattened, _ = self.step_model.net.Reshape(
state_config.state_link.blob,
[
state_config.state_link.blob,
state_config.state_link.blob + '_old_shape',
],
shape=[self.beam_size, -1],
)
state_chosen_per_hypo = self.step_model.net.Gather(
[state_flattened, hypo_t_int32],
str(state_config.state_link.blob) + '_chosen_per_hypo',
)
return self.StateConfig(
initial_value=state_config.initial_value,
state_prev_link=state_config.state_prev_link,
state_link=self.LinkConfig(
blob=state_chosen_per_hypo,
offset=state_config.state_link.offset,
window=state_config.state_link.window,
)
)
state_configs = [choose_state_per_hypo(c) for c in state_configs]
initial_scores = self.model.param_init_net.ConstantFill(
[],
'initial_scores',
shape=[1],
value=0.0,
dtype=core.DataType.FLOAT,
)
if go_token_id:
initial_tokens = self.model.net.Copy(
[go_token_id],
'initial_tokens',
)
else:
initial_tokens = self.model.param_init_net.ConstantFill(
[],
'initial_tokens',
shape=[1],
value=float(self.go_token_id),
dtype=core.DataType.FLOAT,
)
initial_hypo = self.model.param_init_net.ConstantFill(
[],
'initial_hypo',
shape=[1],
value=0.0,
dtype=core.DataType.FLOAT,
)
encoder_inputs_flattened, _ = self.model.net.Reshape(
inputs,
['encoder_inputs_flattened', 'encoder_inputs_old_shape'],
shape=[-1],
)
init_attention = self.model.net.ConstantFill(
encoder_inputs_flattened,
'init_attention',
value=0.0,
dtype=core.DataType.FLOAT,
)
state_configs = state_configs + [
self.StateConfig(
initial_value=initial_scores,
state_prev_link=self.LinkConfig(self.scores_t_prev, 0, 1),
state_link=self.LinkConfig(scores_t, 1, 1),
),
self.StateConfig(
initial_value=initial_tokens,
state_prev_link=self.LinkConfig(self.tokens_t_prev, 0, 1),
state_link=self.LinkConfig(tokens_t, 1, 1),
),
self.StateConfig(
initial_value=initial_hypo,
state_prev_link=self.LinkConfig(self.hypo_t_prev, 0, 1),
state_link=self.LinkConfig(hypo_t, 1, 1),
),
self.StateConfig(
initial_value=init_attention,
state_prev_link=self.LinkConfig(self.attention_t_prev, 0, 1),
state_link=self.LinkConfig(attention_t, 1, 1),
),
]
fake_input = self.model.net.ConstantFill(
length,
'beam_search_fake_input',
input_as_shape=True,
extra_shape=[self.beam_size, 1],
value=0.0,
dtype=core.DataType.FLOAT,
)
all_inputs = (
[fake_input] +
self.step_model.params +
[state_config.initial_value for state_config in state_configs] +
data_dependencies
)
forward_links = []
recurrent_states = []
for state_config in state_configs:
state_name = str(state_config.state_prev_link.blob) + '_states'
recurrent_states.append(state_name)
forward_links.append((
state_config.state_prev_link.blob,
state_name,
state_config.state_prev_link.offset,
state_config.state_prev_link.window,
))
forward_links.append((
state_config.state_link.blob,
state_name,
state_config.state_link.offset,
state_config.state_link.window,
))
link_internal, link_external, link_offset, link_window = (
zip(*forward_links)
)
all_outputs = [
str(s) + '_all'
for s in [scores_t, tokens_t, hypo_t, attention_t]
]
results = self.model.net.RecurrentNetwork(
all_inputs,
all_outputs + ['step_workspaces'],
param=[all_inputs.index(p) for p in self.step_model.params],
alias_src=[
str(s) + '_states'
for s in [
self.scores_t_prev,
self.tokens_t_prev,
self.hypo_t_prev,
self.attention_t_prev,
]
],
alias_dst=all_outputs,
alias_offset=[0] * 4,
recurrent_states=recurrent_states,
initial_recurrent_state_ids=[
all_inputs.index(state_config.initial_value)
for state_config in state_configs
],
link_internal=[str(l) for l in link_internal],
link_external=[str(l) for l in link_external],
link_offset=link_offset,
link_window=link_window,
backward_link_internal=[],
backward_link_external=[],
backward_link_offset=[],
step_net=self.step_model.net.Proto(),
timestep=str(self.timestep),
outputs_with_grads=[],
enable_rnn_executor=1,
rnn_executor_debug=0
)
score_t_all, tokens_t_all, hypo_t_all, attention_t_all = results[:4]
output_token_beam_list = self.model.net.Cast(
tokens_t_all,
'output_token_beam_list',
to=core.DataType.INT32,
)
output_prev_index_beam_list = self.model.net.Cast(
hypo_t_all,
'output_prev_index_beam_list',
to=core.DataType.INT32,
)
output_score_beam_list = self.model.net.Alias(
score_t_all,
'output_score_beam_list',
)
output_attention_weights_beam_list = self.model.net.Alias(
attention_t_all,
'output_attention_weights_beam_list',
)
return (
output_token_beam_list,
output_prev_index_beam_list,
output_score_beam_list,
output_attention_weights_beam_list,
)
|
pytorch-master
|
caffe2/python/models/seq2seq/beam_search.py
|
## @package train
# Module caffe2.python.models.seq2seq.train
import argparse
import collections
import logging
import math
import numpy as np
import random
import time
import sys
import os
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import core, workspace, data_parallel_model
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
Batch = collections.namedtuple('Batch', [
'encoder_inputs',
'encoder_lengths',
'decoder_inputs',
'decoder_lengths',
'targets',
'target_weights',
])
def prepare_batch(batch):
encoder_lengths = [len(entry[0]) for entry in batch]
max_encoder_length = max(encoder_lengths)
decoder_lengths = []
max_decoder_length = max([len(entry[1]) for entry in batch])
batch_encoder_inputs = []
batch_decoder_inputs = []
batch_targets = []
batch_target_weights = []
for source_seq, target_seq in batch:
encoder_pads = (
[seq2seq_util.PAD_ID] * (max_encoder_length - len(source_seq))
)
batch_encoder_inputs.append(
list(reversed(source_seq)) + encoder_pads
)
decoder_pads = (
[seq2seq_util.PAD_ID] * (max_decoder_length - len(target_seq))
)
target_seq_with_go_token = [seq2seq_util.GO_ID] + target_seq
decoder_lengths.append(len(target_seq_with_go_token))
batch_decoder_inputs.append(target_seq_with_go_token + decoder_pads)
target_seq_with_eos = target_seq + [seq2seq_util.EOS_ID]
targets = target_seq_with_eos + decoder_pads
batch_targets.append(targets)
if len(source_seq) + len(target_seq) == 0:
target_weights = [0] * len(targets)
else:
target_weights = [
1 if target != seq2seq_util.PAD_ID else 0
for target in targets
]
batch_target_weights.append(target_weights)
return Batch(
encoder_inputs=np.array(
batch_encoder_inputs,
dtype=np.int32,
).transpose(),
encoder_lengths=np.array(encoder_lengths, dtype=np.int32),
decoder_inputs=np.array(
batch_decoder_inputs,
dtype=np.int32,
).transpose(),
decoder_lengths=np.array(decoder_lengths, dtype=np.int32),
targets=np.array(
batch_targets,
dtype=np.int32,
).transpose(),
target_weights=np.array(
batch_target_weights,
dtype=np.float32,
).transpose(),
)
class Seq2SeqModelCaffe2(object):
def _build_model(
self,
init_params,
):
model = Seq2SeqModelHelper(init_params=init_params)
self._build_shared(model)
self._build_embeddings(model)
forward_model = Seq2SeqModelHelper(init_params=init_params)
self._build_shared(forward_model)
self._build_embeddings(forward_model)
if self.num_gpus == 0:
loss_blobs = self.model_build_fun(model)
model.AddGradientOperators(loss_blobs)
self.norm_clipped_grad_update(
model,
scope='norm_clipped_grad_update'
)
self.forward_model_build_fun(forward_model)
else:
assert (self.batch_size % self.num_gpus) == 0
data_parallel_model.Parallelize_GPU(
forward_model,
input_builder_fun=lambda m: None,
forward_pass_builder_fun=self.forward_model_build_fun,
param_update_builder_fun=None,
devices=list(range(self.num_gpus)),
)
def clipped_grad_update_bound(model):
self.norm_clipped_grad_update(
model,
scope='norm_clipped_grad_update',
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=lambda m: None,
forward_pass_builder_fun=self.model_build_fun,
param_update_builder_fun=clipped_grad_update_bound,
devices=list(range(self.num_gpus)),
)
self.norm_clipped_sparse_grad_update(
model,
scope='norm_clipped_sparse_grad_update',
)
self.model = model
self.forward_net = forward_model.net
def _build_shared(self, model):
optimizer_params = self.model_params['optimizer_params']
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
self.learning_rate = model.AddParam(
name='learning_rate',
init_value=float(optimizer_params['learning_rate']),
trainable=False,
)
self.global_step = model.AddParam(
name='global_step',
init_value=0,
trainable=False,
)
self.start_time = model.AddParam(
name='start_time',
init_value=time.time(),
trainable=False,
)
def _build_embeddings(self, model):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
sqrt3 = math.sqrt(3)
self.encoder_embeddings = model.param_init_net.UniformFill(
[],
'encoder_embeddings',
shape=[
self.source_vocab_size,
self.model_params['encoder_embedding_size'],
],
min=-sqrt3,
max=sqrt3,
)
model.params.append(self.encoder_embeddings)
self.decoder_embeddings = model.param_init_net.UniformFill(
[],
'decoder_embeddings',
shape=[
self.target_vocab_size,
self.model_params['decoder_embedding_size'],
],
min=-sqrt3,
max=sqrt3,
)
model.params.append(self.decoder_embeddings)
def model_build_fun(self, model, forward_only=False, loss_scale=None):
encoder_inputs = model.net.AddExternalInput(
workspace.GetNameScope() + 'encoder_inputs',
)
encoder_lengths = model.net.AddExternalInput(
workspace.GetNameScope() + 'encoder_lengths',
)
decoder_inputs = model.net.AddExternalInput(
workspace.GetNameScope() + 'decoder_inputs',
)
decoder_lengths = model.net.AddExternalInput(
workspace.GetNameScope() + 'decoder_lengths',
)
targets = model.net.AddExternalInput(
workspace.GetNameScope() + 'targets',
)
target_weights = model.net.AddExternalInput(
workspace.GetNameScope() + 'target_weights',
)
attention_type = self.model_params['attention']
assert attention_type in ['none', 'regular', 'dot']
(
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
) = seq2seq_util.build_embedding_encoder(
model=model,
encoder_params=self.encoder_params,
num_decoder_layers=len(self.model_params['decoder_layer_configs']),
inputs=encoder_inputs,
input_lengths=encoder_lengths,
vocab_size=self.source_vocab_size,
embeddings=self.encoder_embeddings,
embedding_size=self.model_params['encoder_embedding_size'],
use_attention=(attention_type != 'none'),
num_gpus=self.num_gpus,
)
(
decoder_outputs,
decoder_output_size,
) = seq2seq_util.build_embedding_decoder(
model,
decoder_layer_configs=self.model_params['decoder_layer_configs'],
inputs=decoder_inputs,
input_lengths=decoder_lengths,
encoder_lengths=encoder_lengths,
encoder_outputs=encoder_outputs,
weighted_encoder_outputs=weighted_encoder_outputs,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
encoder_units_per_layer=encoder_units_per_layer,
vocab_size=self.target_vocab_size,
embeddings=self.decoder_embeddings,
embedding_size=self.model_params['decoder_embedding_size'],
attention_type=attention_type,
forward_only=False,
num_gpus=self.num_gpus,
)
output_logits = seq2seq_util.output_projection(
model=model,
decoder_outputs=decoder_outputs,
decoder_output_size=decoder_output_size,
target_vocab_size=self.target_vocab_size,
decoder_softmax_size=self.model_params['decoder_softmax_size'],
)
targets, _ = model.net.Reshape(
[targets],
['targets', 'targets_old_shape'],
shape=[-1],
)
target_weights, _ = model.net.Reshape(
[target_weights],
['target_weights', 'target_weights_old_shape'],
shape=[-1],
)
_, loss_per_word = model.net.SoftmaxWithLoss(
[output_logits, targets, target_weights],
['OutputProbs_INVALID', 'loss_per_word'],
only_loss=True,
)
num_words = model.net.SumElements(
[target_weights],
'num_words',
)
total_loss_scalar = model.net.Mul(
[loss_per_word, num_words],
'total_loss_scalar',
)
total_loss_scalar_weighted = model.net.Scale(
[total_loss_scalar],
'total_loss_scalar_weighted',
scale=1.0 / self.batch_size,
)
return [total_loss_scalar_weighted]
def forward_model_build_fun(self, model, loss_scale=None):
return self.model_build_fun(
model=model,
forward_only=True,
loss_scale=loss_scale
)
def _calc_norm_ratio(self, model, params, scope, ONE):
with core.NameScope(scope):
grad_squared_sums = []
for i, param in enumerate(params):
logger.info(param)
grad = (
model.param_to_grad[param]
if not isinstance(
model.param_to_grad[param],
core.GradientSlice,
) else model.param_to_grad[param].values
)
grad_squared = model.net.Sqr(
[grad],
'grad_{}_squared'.format(i),
)
grad_squared_sum = model.net.SumElements(
grad_squared,
'grad_{}_squared_sum'.format(i),
)
grad_squared_sums.append(grad_squared_sum)
grad_squared_full_sum = model.net.Sum(
grad_squared_sums,
'grad_squared_full_sum',
)
global_norm = model.net.Pow(
grad_squared_full_sum,
'global_norm',
exponent=0.5,
)
clip_norm = model.param_init_net.ConstantFill(
[],
'clip_norm',
shape=[],
value=float(self.model_params['max_gradient_norm']),
)
max_norm = model.net.Max(
[global_norm, clip_norm],
'max_norm',
)
norm_ratio = model.net.Div(
[clip_norm, max_norm],
'norm_ratio',
)
return norm_ratio
def _apply_norm_ratio(
self, norm_ratio, model, params, learning_rate, scope, ONE
):
for param in params:
param_grad = model.param_to_grad[param]
nlr = model.net.Negative(
[learning_rate],
'negative_learning_rate',
)
with core.NameScope(scope):
update_coeff = model.net.Mul(
[nlr, norm_ratio],
'update_coeff',
broadcast=1,
)
if isinstance(param_grad, core.GradientSlice):
param_grad_values = param_grad.values
model.net.ScatterWeightedSum(
[
param,
ONE,
param_grad.indices,
param_grad_values,
update_coeff,
],
param,
)
else:
model.net.WeightedSum(
[
param,
ONE,
param_grad,
update_coeff,
],
param,
)
def norm_clipped_grad_update(self, model, scope):
if self.num_gpus == 0:
learning_rate = self.learning_rate
else:
learning_rate = model.CopyCPUToGPU(self.learning_rate, 'LR')
params = []
for param in model.GetParams(top_scope=True):
if param in model.param_to_grad:
if not isinstance(
model.param_to_grad[param],
core.GradientSlice,
):
params.append(param)
ONE = model.param_init_net.ConstantFill(
[],
'ONE',
shape=[1],
value=1.0,
)
logger.info('Dense trainable variables: ')
norm_ratio = self._calc_norm_ratio(model, params, scope, ONE)
self._apply_norm_ratio(
norm_ratio, model, params, learning_rate, scope, ONE
)
def norm_clipped_sparse_grad_update(self, model, scope):
learning_rate = self.learning_rate
params = []
for param in model.GetParams(top_scope=True):
if param in model.param_to_grad:
if isinstance(
model.param_to_grad[param],
core.GradientSlice,
):
params.append(param)
ONE = model.param_init_net.ConstantFill(
[],
'ONE',
shape=[1],
value=1.0,
)
logger.info('Sparse trainable variables: ')
norm_ratio = self._calc_norm_ratio(model, params, scope, ONE)
self._apply_norm_ratio(
norm_ratio, model, params, learning_rate, scope, ONE
)
def total_loss_scalar(self):
if self.num_gpus == 0:
return workspace.FetchBlob('total_loss_scalar')
else:
total_loss = 0
for i in range(self.num_gpus):
name = 'gpu_{}/total_loss_scalar'.format(i)
gpu_loss = workspace.FetchBlob(name)
total_loss += gpu_loss
return total_loss
def _init_model(self):
workspace.RunNetOnce(self.model.param_init_net)
def create_net(net):
workspace.CreateNet(
net,
input_blobs=[str(i) for i in net.external_inputs],
)
create_net(self.model.net)
create_net(self.forward_net)
def __init__(
self,
model_params,
source_vocab_size,
target_vocab_size,
num_gpus=1,
num_cpus=1,
):
self.model_params = model_params
self.encoder_type = 'rnn'
self.encoder_params = model_params['encoder_type']
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.num_gpus = num_gpus
self.num_cpus = num_cpus
self.batch_size = model_params['batch_size']
workspace.GlobalInit([
'caffe2',
# NOTE: modify log level for debugging purposes
'--caffe2_log_level=0',
# NOTE: modify log level for debugging purposes
'--v=0',
# Fail gracefully if one of the threads fails
'--caffe2_handle_executor_threads_exceptions=1',
'--caffe2_mkl_num_threads=' + str(self.num_cpus),
])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
workspace.ResetWorkspace()
def initialize_from_scratch(self):
logger.info('Initializing Seq2SeqModelCaffe2 from scratch: Start')
self._build_model(init_params=True)
self._init_model()
logger.info('Initializing Seq2SeqModelCaffe2 from scratch: Finish')
def get_current_step(self):
return workspace.FetchBlob(self.global_step)[0]
def inc_current_step(self):
workspace.FeedBlob(
self.global_step,
np.array([self.get_current_step() + 1]),
)
def step(
self,
batch,
forward_only
):
if self.num_gpus < 1:
batch_obj = prepare_batch(batch)
for batch_obj_name, batch_obj_value in zip(
Batch._fields,
batch_obj,
):
workspace.FeedBlob(batch_obj_name, batch_obj_value)
else:
for i in range(self.num_gpus):
gpu_batch = batch[i::self.num_gpus]
batch_obj = prepare_batch(gpu_batch)
for batch_obj_name, batch_obj_value in zip(
Batch._fields,
batch_obj,
):
name = 'gpu_{}/{}'.format(i, batch_obj_name)
if batch_obj_name in ['encoder_inputs', 'decoder_inputs']:
dev = core.DeviceOption(caffe2_pb2.CPU)
else:
dev = core.DeviceOption(workspace.GpuDeviceType, i)
workspace.FeedBlob(name, batch_obj_value, device_option=dev)
if forward_only:
workspace.RunNet(self.forward_net)
else:
workspace.RunNet(self.model.net)
self.inc_current_step()
return self.total_loss_scalar()
def save(self, checkpoint_path_prefix, current_step):
checkpoint_path = '{0}-{1}'.format(
checkpoint_path_prefix,
current_step,
)
assert workspace.RunOperatorOnce(core.CreateOperator(
'Save',
self.model.GetAllParams(),
[],
absolute_path=True,
db=checkpoint_path,
db_type='minidb',
))
checkpoint_config_path = os.path.join(
os.path.dirname(checkpoint_path_prefix),
'checkpoint',
)
with open(checkpoint_config_path, 'w') as checkpoint_config_file:
checkpoint_config_file.write(
'model_checkpoint_path: "' + checkpoint_path + '"\n'
'all_model_checkpoint_paths: "' + checkpoint_path + '"\n'
)
logger.info('Saved checkpoint file to ' + checkpoint_path)
return checkpoint_path
def gen_batches(source_corpus, target_corpus, source_vocab, target_vocab,
batch_size, max_length):
with open(source_corpus) as source, open(target_corpus) as target:
parallel_sentences = []
for source_sentence, target_sentence in zip(source, target):
numerized_source_sentence = seq2seq_util.get_numberized_sentence(
source_sentence,
source_vocab,
)
numerized_target_sentence = seq2seq_util.get_numberized_sentence(
target_sentence,
target_vocab,
)
if (
len(numerized_source_sentence) > 0 and
len(numerized_target_sentence) > 0 and
(
max_length is None or (
len(numerized_source_sentence) <= max_length and
len(numerized_target_sentence) <= max_length
)
)
):
parallel_sentences.append((
numerized_source_sentence,
numerized_target_sentence,
))
parallel_sentences.sort(key=lambda s_t: (len(s_t[0]), len(s_t[1])))
batches, batch = [], []
for sentence_pair in parallel_sentences:
batch.append(sentence_pair)
if len(batch) >= batch_size:
batches.append(batch)
batch = []
if len(batch) > 0:
while len(batch) < batch_size:
batch.append(batch[-1])
assert len(batch) == batch_size
batches.append(batch)
random.shuffle(batches)
return batches
def run_seq2seq_model(args, model_params=None):
source_vocab = seq2seq_util.gen_vocab(
args.source_corpus,
args.unk_threshold,
)
target_vocab = seq2seq_util.gen_vocab(
args.target_corpus,
args.unk_threshold,
)
logger.info('Source vocab size {}'.format(len(source_vocab)))
logger.info('Target vocab size {}'.format(len(target_vocab)))
batches = gen_batches(args.source_corpus, args.target_corpus, source_vocab,
target_vocab, model_params['batch_size'],
args.max_length)
logger.info('Number of training batches {}'.format(len(batches)))
batches_eval = gen_batches(args.source_corpus_eval, args.target_corpus_eval,
source_vocab, target_vocab,
model_params['batch_size'], args.max_length)
logger.info('Number of eval batches {}'.format(len(batches_eval)))
with Seq2SeqModelCaffe2(
model_params=model_params,
source_vocab_size=len(source_vocab),
target_vocab_size=len(target_vocab),
num_gpus=args.num_gpus,
num_cpus=20,
) as model_obj:
model_obj.initialize_from_scratch()
for i in range(args.epochs):
logger.info('Epoch {}'.format(i))
total_loss = 0
for batch in batches:
total_loss += model_obj.step(
batch=batch,
forward_only=False,
)
logger.info('\ttraining loss {}'.format(total_loss))
total_loss = 0
for batch in batches_eval:
total_loss += model_obj.step(
batch=batch,
forward_only=True,
)
logger.info('\teval loss {}'.format(total_loss))
if args.checkpoint is not None:
model_obj.save(args.checkpoint, i)
def main():
random.seed(31415)
parser = argparse.ArgumentParser(
description='Caffe2: Seq2Seq Training'
)
parser.add_argument('--source-corpus', type=str, default=None,
help='Path to source corpus in a text file format. Each '
'line in the file should contain a single sentence',
required=True)
parser.add_argument('--target-corpus', type=str, default=None,
help='Path to target corpus in a text file format',
required=True)
parser.add_argument('--max-length', type=int, default=None,
help='Maximal lengths of train and eval sentences')
parser.add_argument('--unk-threshold', type=int, default=50,
help='Threshold frequency under which token becomes '
'labeled unknown token')
parser.add_argument('--batch-size', type=int, default=32,
help='Training batch size')
parser.add_argument('--epochs', type=int, default=10,
help='Number of iterations over training data')
parser.add_argument('--learning-rate', type=float, default=0.5,
help='Learning rate')
parser.add_argument('--max-gradient-norm', type=float, default=1.0,
help='Max global norm of gradients at the end of each '
'backward pass. We do clipping to match the number.')
parser.add_argument('--num-gpus', type=int, default=0,
help='Number of GPUs for data parallel model')
parser.add_argument('--use-bidirectional-encoder', action='store_true',
help='Set flag to use bidirectional recurrent network '
'for first layer of encoder')
parser.add_argument('--use-attention', action='store_true',
help='Set flag to use seq2seq with attention model')
parser.add_argument('--source-corpus-eval', type=str, default=None,
help='Path to source corpus for evaluation in a text '
'file format', required=True)
parser.add_argument('--target-corpus-eval', type=str, default=None,
help='Path to target corpus for evaluation in a text '
'file format', required=True)
parser.add_argument('--encoder-cell-num-units', type=int, default=512,
help='Number of cell units per encoder layer')
parser.add_argument('--encoder-num-layers', type=int, default=2,
help='Number encoder layers')
parser.add_argument('--decoder-cell-num-units', type=int, default=512,
help='Number of cell units in the decoder layer')
parser.add_argument('--decoder-num-layers', type=int, default=2,
help='Number decoder layers')
parser.add_argument('--encoder-embedding-size', type=int, default=256,
help='Size of embedding in the encoder layer')
parser.add_argument('--decoder-embedding-size', type=int, default=512,
help='Size of embedding in the decoder layer')
parser.add_argument('--decoder-softmax-size', type=int, default=None,
help='Size of softmax layer in the decoder')
parser.add_argument('--checkpoint', type=str, default=None,
help='Path to checkpoint')
args = parser.parse_args()
encoder_layer_configs = [
dict(
num_units=args.encoder_cell_num_units,
),
] * args.encoder_num_layers
if args.use_bidirectional_encoder:
assert args.encoder_cell_num_units % 2 == 0
encoder_layer_configs[0]['num_units'] /= 2
decoder_layer_configs = [
dict(
num_units=args.decoder_cell_num_units,
),
] * args.decoder_num_layers
run_seq2seq_model(args, model_params=dict(
attention=('regular' if args.use_attention else 'none'),
decoder_layer_configs=decoder_layer_configs,
encoder_type=dict(
encoder_layer_configs=encoder_layer_configs,
use_bidirectional_encoder=args.use_bidirectional_encoder,
),
batch_size=args.batch_size,
optimizer_params=dict(
learning_rate=args.learning_rate,
),
encoder_embedding_size=args.encoder_embedding_size,
decoder_embedding_size=args.decoder_embedding_size,
decoder_softmax_size=args.decoder_softmax_size,
max_gradient_norm=args.max_gradient_norm,
))
if __name__ == '__main__':
main()
|
pytorch-master
|
caffe2/python/models/seq2seq/train.py
|
## @package formatter
# Module caffe2.python.docs.formatter
from caffe2.python.docs.parser import Parser
class Formatter(object):
def __init__(self):
self.content = ""
def clone(self):
return self.__class__()
def dump(self):
return self.content
def parseAndAdd(self, text):
text = Parser(text, self).parse()
self.addRaw(text)
def addRaw(self, text):
raise Exception('Not yet implemented.')
def addLine(self, text):
raise Exception('Not yet implemented.')
def addLinebreak(self):
raise Exception('Not yet implemented.')
def addHeader(self, text):
raise Exception('Not yet implemented.')
def addEmphasis(self, text):
raise Exception('Not yet implemented.')
def addList(self, textList):
raise Exception('Not yet implemented.')
def addLink(self, text, url):
raise Exception('Not yet implemented.')
def addCode(self, text):
raise Exception('Not yet implemented.')
def addCodeLink(self, text):
raise Exception('Not yet implemented.')
def addTable(self, table):
raise Exception('Not yet implemented.')
def addBreak(self):
raise Exception('Not yet implemented.')
class Markdown(Formatter):
def addRaw(self, text):
self.content += "{text}".format(text=text)
def addLine(self, text, new_line=False):
self.content += "{line}{text}\n".format(line=('\n' if new_line else ''),
text=text)
def addLinebreak(self):
self.content += "\n"
def addHeader(self, text, h=1):
self.addLine("{header} {text}".format(header=h * '#', text=text), True)
def addEmphasis(self, text, s=1):
self.addRaw("{stars}{text}{stars}".format(stars=s * '*', text=text))
def addList(self, textList):
for text in textList:
self.addLine("- {text}".format(text=text), True)
self.addLinebreak()
def addLink(self, text, url):
self.addRaw("[{text}]({url})".format(text=text, url=url))
def addCodeLink(self, path, options=None):
self.addRaw("({path})".format(path=path))
def addCode(self, text, inline=False):
if (inline):
self.content += "`{text}`".format(text=text)
else:
self.addRaw("\n\n```\n{text}```\n\n".format(text=text))
def addTable(self, table, noTitle=False):
self.addLinebreak()
assert(len(table) > 1)
if noTitle:
table.insert(0, [' ' for i in range(len(table[0]))])
self.addLine(' | '.join(table[0]))
self.addLine(' | '.join(['----' for i in range(len(table[0]))]))
for row in table[1:]:
self.addLine(' | '.join(row))
self.addLinebreak()
def addBreak(self):
self.addLine('\n---\n', True)
|
pytorch-master
|
caffe2/python/docs/formatter.py
|
pytorch-master
|
caffe2/python/docs/__init__.py
|
|
## @package parser
# Module caffe2.python.docs.parser
import re
class Parser(object):
# List of tuples (regex_str, lambda(regex_match, formatter))
# If a lambda returns True it will be called repeatedly with replacement
# otherwise it will only be called on text that hasn't been parsed yet.
regexes = [
# Code blocks of various formats
('````(.+?)````',
lambda m, f: f.addCode(m.group(1))
),
('```(.+?)```',
lambda m, f: f.addCode(m.group(1))
),
(r'((( {2})+)(\S.*)(\n\s*\n|\n))+',
lambda m, f: f.addCode(m.group(0))
),
(r'([^\.])\n',
lambda m, f: f.addRaw('{c} '.format(c=m.group(1))) or True
),
('`(.+?)`',
lambda m, f: f.addCode(m.group(1), True)
),
# Make links clickable
('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
r'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
lambda m, f: f.addLink(m.group(0), m.group(0))
),
(r'\*\*(.+?)\*\*',
lambda m, f: f.addEmphasis(m.group(1), 2)
),
(r'\*(.+?)\*',
lambda m, f: f.addEmphasis(m.group(1), 1)
),
]
def __init__(self, text, formatter):
self.text = text
self.lines = []
self.formatter = formatter
def parseText(self):
UNPARSED = 0
PARSED = 1
parsed_block = [(UNPARSED, self.text)]
for regex, func in self.regexes:
index = 0
while index < len(parsed_block):
label, text = parsed_block[index]
# Already been parsed
if (label == PARSED):
index += 1
continue
match = re.search(regex, text)
if match:
parsed_block.pop(index)
start = match.start(0)
end = match.end(0)
f = self.formatter.clone()
merge = func(match, f)
if merge:
merged = text[:start] + f.dump() + text[end:]
parsed_block.insert(index, (UNPARSED, merged))
else:
if text[:start]:
parsed_block.insert(index,
(UNPARSED, text[:start]))
index += 1
parsed_block.insert(index, (PARSED, f.dump()))
index += 1
if text[end:]:
parsed_block.insert(index,
(UNPARSED, text[end:]))
else:
index += 1
self.lines += [i for _, i in parsed_block]
self.text = ' '.join(self.lines)
def parse(self):
self.parseText()
return self.text
|
pytorch-master
|
caffe2/python/docs/parser.py
|
## @package generator
# Module caffe2.python.docs.generator
import argparse
import os
from caffe2.python import core, workspace
from caffe2.python.docs.formatter import Markdown
from future.utils import viewitems, viewvalues
OpSchema = workspace.C.OpSchema
class DocUploader(object):
def __init__(self):
pass
def upload(self, text):
pass
class DocGenerator(object):
def __init__(self, formatter, uploader):
self.formatter = formatter
self.uploader = uploader
self.content_body = ""
def create_body(self):
pass
def update(self):
self.uploader.upload(self.content_body)
class OpDocGenerator(DocGenerator):
def getOperatorDoc(self, name, schema, priority):
return OperatorDoc(name, schema, priority)
def getOperatorEngine(self, name):
return OperatorEngine(name)
def getOperators(self):
# map: op_name -> operator
self.operators = {}
# map: op_name -> [engine, engine]
self.engines = {}
def filePriority(x):
if x == "caffe2/caffe2/operators":
return 0
if 'contrib' in x.split('/'):
return 2
if 'experiments' in x.split('/'):
return 3
return 1
for name in core._GetRegisteredOperators():
schema = OpSchema.get(name)
if schema:
priority = filePriority(os.path.dirname(schema.file))
operator = self.getOperatorDoc(name, schema, priority)
self.operators[name] = operator
# Engine
elif name.find("_ENGINE_") != -1:
engine = self.getOperatorEngine(name)
if engine.base_op_name in self.engines:
self.engines[engine.base_op_name].append(engine)
else:
self.engines[engine.base_op_name] = [engine]
# No schema
else:
priority = 4
self.operators[name] = self.getOperatorDoc(name, schema, priority)
for name, engines in viewitems(self.engines):
if name in self.operators:
self.operators[name].addEngines(engines)
# Generate a sorted list of operators
return sorted(
viewvalues(self.operators),
key=lambda op: (op.priority, op.name)
)
def createBody(self):
operators = self.getOperators()
for operator in operators:
operator.generateSchema(self.formatter)
self.content_body += self.formatter.dump()
class OperatorEngine(object):
def __init__(self, name):
self.op_name = name
self.base_op_name, self.engine = name.split("_ENGINE_", 1)
def getDeviceImpl(self):
deviceImplList = []
for device, impl in [('CPU', OpSchema.get_cpu_impl(self.op_name)),
('CUDA', OpSchema.get_cuda_impl(self.op_name))]:
if not impl:
continue
deviceImplList.append((device, impl))
return deviceImplList
def generateDoc(self, formatter):
for device, impl in self.getDeviceImpl():
formatter.addLine(
'{engine} on {device}: {impl}'.format(engine=self.engine,
device=device,
impl=impl))
class OperatorDoc(object):
def __init__(self, name, schema, priority):
self.name = name
self.schema = schema
self.priority = priority
print("Gathering docs for {}...".format(self.name))
self.engines = []
def addEngines(self, engines):
self.engines = engines
def generateDoc(self, formatter):
if self.schema.doc:
formatter.parseAndAdd(self.schema.doc)
formatter.addLinebreak()
else:
formatter.addLine("No documentation yet.")
def generateTable(self, formatter, tuples, title_row, title):
if tuples:
if title:
formatter.addHeader(title, 3)
table = []
if title_row:
table = [title_row]
for name, doc in tuples:
table.append([name, doc or ''])
formatter.addTable(table, (table == []))
def generateInterface(self, formatter):
def makeDesc(title, args):
f = formatter.clone()
f.addEmphasis(title, 1)
out = [(f.dump(), '')]
for arg in args:
f = formatter.clone()
if isinstance(arg, tuple):
name = arg[0]
if len(arg) > 1:
description = arg[1] or ''
else:
description = ''
else:
name = arg.name
description = arg.description or ''
f.addCode(name, inline=True)
out.append((f.dump(), description or ''))
return out
tuples = []
if self.schema.args:
tuples += makeDesc('Arguments', self.schema.args)
if self.schema.input_desc:
tuples += makeDesc('Inputs', self.schema.input_desc)
if self.schema.output_desc:
tuples += makeDesc('Outputs', self.schema.output_desc)
self.generateTable(formatter, tuples, None, 'Interface')
print("Generated interface for {}".format(self.name))
def generateCodeLink(self, formatter):
formatter.addHeader("Code", 3)
formatter.addLinebreak()
formatter.addCodeLink(self.schema.file)
def getInfo(self, formatter, name, impl):
pass
def generateDevices(self, formatter):
formatter.addHeader("Devices", 3)
devices = [
self.getInfo(formatter,
'CPU', OpSchema.get_cpu_impl(self.name)),
self.getInfo(formatter,
'GPU', OpSchema.get_cuda_impl(self.name)),
]
formatter.addList([i for i in devices if i])
def generateEngines(self, formatter):
if not len(self.engines):
return
formatter.addHeader("Engines", 3)
for engine in self.engines:
engine.generateDoc(formatter)
def generateSchema(self, formatter):
formatter.addHeader(self.name, 2)
if self.schema:
self.generateDoc(formatter)
self.generateInterface(formatter)
self.generateCodeLink(formatter)
self.generateDevices(formatter)
self.generateEngines(formatter)
formatter.addBreak()
else:
formatter.addLine("No schema documented yet.")
self.generateDevices(formatter)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Operators catalog generator.")
parser.add_argument('catalog_path', type=str,
help='operators-catalogue.md to write out to')
args = parser.parse_args()
with open(args.catalog_path, 'w') as fp:
ops = OpDocGenerator(Markdown(), DocUploader())
ops.createBody()
fp.write(ops.content_body)
|
pytorch-master
|
caffe2/python/docs/generator.py
|
## @package github
# Module caffe2.python.docs.github
import argparse
import os
from caffe2.python.docs.formatter import Markdown
from caffe2.python.docs.generator import OpDocGenerator, DocUploader
from caffe2.python.docs.generator import OperatorDoc, OperatorEngine
class GHOpDocUploader(DocUploader):
def __init__(self):
pass
def upload(self, content_body):
print(content_body)
class GHMarkdown(Markdown):
def addHeader(self, text, h=1):
self.addLine("\n{header} {text}\n".format(header=h * '#', text=text), True)
def addDocHeader(self):
self.addLine("---")
self.addLine("docid: operators-catalog")
self.addLine("title: Operators Catalog")
self.addLine("layout: operators")
self.addLine("permalink: /docs/operators-catalogue.html")
self.addLine("---")
self.addLine("* TOC")
self.addLine("{:toc}")
def addTable(self, table, noTitle=False):
self.addLinebreak()
assert(len(table) > 1)
self.addLine(' | '.join(['----------' for i in range(len(table[0]))]))
self.addLine(' | '.join(table[0]))
for row in table[1:]:
self.addLine(' | '.join(row))
def addTableHTML(self, table, noTitle=False):
self.addRaw("<table>")
for row in table:
self.addRaw("<tr>")
for cell in row:
self.addRaw("<td>")
self.addLine("{cell}".format(cell=cell))
self.addRaw("</td>")
self.addRaw("</tr>")
self.addRaw("</table>")
def getCodeLink(formatter, schema):
formatter = formatter.clone()
path = os.path.relpath(schema.file, "caffe2")
schemaLink = ('https://github.com/pytorch/pytorch/blob/master/{path}'
.format(path=path))
formatter.addLink('{path}'.format(path=path), schemaLink)
return formatter.dump()
class GHOperatorEngine(OperatorEngine):
def generateDoc(self, formatter):
for device, _ in self.getDeviceImpl():
formatter.addCode('{engine}'.format(engine=self.engine), True)
if device:
formatter.addRaw(' on ')
formatter.addEmphasis("{device}".format(device=device), 1)
class GHOperatorDoc(OperatorDoc):
def generateCodeLink(self, formatter):
formatter.addHeader("Code", 3)
formatter.addLinebreak()
formatter.addRaw(getCodeLink(formatter, self.schema))
def getInfo(self, formatter, name, impl):
formatter = formatter.clone()
if impl:
formatter.addEmphasis('{name}'.format(name=name), 1)
formatter.addRaw(' ')
formatter.addCode('{impl}'.format(impl=impl), True)
return formatter.dump()
def generateSchema(self, formatter):
formatter.addHeader(self.name, 2)
if self.schema:
self.generateDoc(formatter)
self.generateInterface(formatter)
self.generateCodeLink(formatter)
formatter.addBreak()
else:
formatter.addLine("No schema documented yet.")
class GHOpDocGenerator(OpDocGenerator):
def getOperatorDoc(self, name, schema, priority):
return GHOperatorDoc(name, schema, priority)
def getOperatorEngine(self, name):
return GHOperatorEngine(name)
def createBody(self):
self.formatter.addDocHeader()
operators = self.getOperators()
for operator in operators:
operator.generateSchema(self.formatter)
self.content_body += self.formatter.dump()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Operators catalog generator.")
parser.add_argument('catalog_path', type=str,
help='operators-catalogue.md to write out to')
args = parser.parse_args()
with open(args.catalog_path, 'w') as fp:
ops = GHOpDocGenerator(GHMarkdown(), GHOpDocUploader)
ops.createBody()
fp.write(ops.content_body)
print("Updated {}!".format(args.catalog_path))
|
pytorch-master
|
caffe2/python/docs/github.py
|
import ctypes
import os
if 'OSS_ONNXIFI_LIB' in os.environ:
lib = os.environ['OSS_ONNXIFI_LIB']
print("Loading ONNXIFI lib: ".format(lib))
ctypes.CDLL(lib, ctypes.RTLD_GLOBAL)
|
pytorch-master
|
caffe2/python/fakelowp/init_shared_libs.py
|
import sys
import numpy as np
def print_test_debug_info(testname, items_dict):
filename = "debug_operator_onnxifi_" + testname + ".txt"
np.set_printoptions(threshold=sys.maxsize)
with open(filename, 'w') as f:
for key, value in items_dict.items():
print(key, value)
f.write("{}\n".format(key))
f.write("{}\n".format(value))
def print_net(net):
for i in net.external_input:
print("Input: {}".format(i))
for i in net.external_output:
print("Output: {}".format(i))
for op in net.op:
print("Op {}".format(op.type))
for x in op.input:
print(" input: {}".format(x))
for y in op.output:
print(" output: {}".format(y))
def _sigmoid(x):
return 1. / (1. + np.exp(np.float64(-x)))
def _tanh(x):
return np.tanh(np.float64(x))
def _swish(x):
return np.float64(x) * _sigmoid(x)
def _gelu_by_sigmoid(x):
return np.float64(x) / (1. + np.exp(np.float64(x) * 1.702))
def _acc_func(opname, x):
if opname == "Swish":
return _swish(x)
elif opname == "Sigmoid":
return _sigmoid(x)
elif opname == "Tanh":
return _tanh(x)
elif opname == "Gelu":
return _gelu_by_sigmoid(x)
else:
return x
def _get_ulp16(x):
abs_x = np.abs(x)
mask = (abs_x > 2.**(-14))
abs_x = mask * abs_x + (1 - mask) * 2.**(-14)
k = np.floor(np.log2(abs_x))
return 2.**(k - 10)
def compute_ulp_error(opname, xvec, y_nnpi):
y_acc = _acc_func(opname, np.float64(xvec))
scale = 1. / _get_ulp16(y_acc)
return (y_nnpi - y_acc) * scale
|
pytorch-master
|
caffe2/python/fakelowp/test_utils.py
|
pytorch-master
|
caffe2/python/fakelowp/__init__.py
|
|
from caffe2.python import core, schema, muji
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
class ComputeNormForBlobs(NetModifier):
"""
This class modifies the net passed in by adding ops to compute norms for
certain blobs.
Args:
blobs: list of blobs to compute norm for
logging_frequency: frequency for printing norms to logs
p: type of norm. Currently it supports p=1 or p=2
compute_averaged_norm: norm or averaged_norm (averaged_norm = norm/size
row_index: to plot the entire blob or simply one row at the row_index)
"""
def __init__(self, blobs, logging_frequency, p=2, compute_averaged_norm=False, row_index=None):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._p = p
self._compute_averaged_norm = compute_averaged_norm
self._field_name_suffix = '_l{}_norm'.format(p)
if compute_averaged_norm:
self._field_name_suffix = '_averaged' + self._field_name_suffix
if row_index and row_index < 0:
raise Exception('{0} is not a valid row index, row_index should be >= 0'.format(
row_index))
self.row_index = row_index
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
p = self._p
compute_averaged_norm = self._compute_averaged_norm
row_index = self.row_index
CPU = muji.OnCPU()
# if given, blob_to_device is a map from blob to device_option
blob_to_device = blob_to_device or {}
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
if blob in blob_to_device:
device = blob_to_device[blob]
else:
device = CPU
with core.DeviceScope(device):
if row_index and row_index >= 0:
blob = net.Slice(
[blob],
net.NextScopedBlob(prefix=blob + '_row_{0}'.format(row_index)),
starts=[row_index, 0],
ends=[row_index + 1, -1]
)
cast_blob = net.Cast(
blob,
net.NextScopedBlob(prefix=blob + '_float'),
to=core.DataType.FLOAT
)
norm_name = net.NextScopedBlob(prefix=blob + self._field_name_suffix)
norm = net.LpNorm(
cast_blob, norm_name, p=p, average=compute_averaged_norm
)
norm_stop_gradient = net.StopGradient(norm, net.NextScopedBlob(norm_name + "_stop_gradient"))
if self._logging_frequency >= 1:
net.Print(norm, [], every_n=self._logging_frequency)
if modify_output_record:
output_field_name = str(blob) + self._field_name_suffix
output_scalar = schema.Scalar((np.float, (1,)), norm)
if net.output_record() is None:
net.set_output_record(
schema.Struct((output_field_name, output_scalar))
)
else:
net.AppendOutputRecordField(
output_field_name,
output_scalar)
def field_name_suffix(self):
return self._field_name_suffix
|
pytorch-master
|
caffe2/python/modeling/compute_norm_for_blobs.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.gradient_clipping import GradientClipping
import numpy as np
class GradientClippingTest(unittest.TestCase):
def test_gradient_clipping_by_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (3 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 17)
def test_gradient_clipping_by_norm_l1_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l1_norm',
clip_threshold=0.1,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (2 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 15)
def test_gradient_clipping_by_norm_using_param_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
use_parameter_norm=True,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (5 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 21)
def test_gradient_clipping_by_norm_compute_norm_ratio(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
use_parameter_norm=True,
compute_norm_ratio=True,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (6 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 23)
def test_gradient_clipping_by_value(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
clip_max = 1e-8
clip_min = 0
net_modifier = GradientClipping(
grad_clip_method='by_value',
clip_max=clip_max,
clip_min=clip_min,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (1 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 13)
fc1_w_grad = workspace.FetchBlob('fc1_w_grad')
self.assertLessEqual(np.amax(fc1_w_grad), clip_max)
self.assertGreaterEqual(np.amin(fc1_w_grad), clip_min)
def test_gradient_clipping_by_norm_including_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
blobs_to_include=['fc1_w'],
blobs_to_exclude=None
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 1 * (3 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 14)
def test_gradient_clipping_by_norm_excluding_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
blobs_to_include=None,
blobs_to_exclude=['fc1_w', 'fc2_w']
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 0 * (3 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 11)
|
pytorch-master
|
caffe2/python/modeling/gradient_clipping_test.py
|
import unittest
from caffe2.python import brew, model_helper, workspace
from caffe2.python.modeling.initializers import (
Initializer, PseudoFP16Initializer)
class InitializerTest(unittest.TestCase):
def test_fc_initializer(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=1, dim_out=1,
WeightInitializer=Initializer)
# no operator name set, will use custom
fc3 = brew.fc(model, fc2, "fc3", dim_in=1, dim_out=1,
WeightInitializer=Initializer,
weight_init=("ConstantFill", {}),
)
# operator name set, no initializer class set
fc4 = brew.fc(model, fc3, "fc4", dim_in=1, dim_out=1,
WeightInitializer=None,
weight_init=("ConstantFill", {})
)
@unittest.skipIf(not workspace.has_gpu_support, 'No GPU support')
def test_fc_fp16_initializer(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1)
# default operator, PseudoFP16Initializer
fc2 = brew.fc(model, fc1, "fc2", dim_in=1, dim_out=1,
WeightInitializer=PseudoFP16Initializer
)
# specified operator, PseudoFP16Initializer
fc3 = brew.fc(model, fc2, "fc3", dim_in=1, dim_out=1,
weight_init=("ConstantFill", {}),
WeightInitializer=PseudoFP16Initializer
)
def test_fc_external_initializer(self):
model = model_helper.ModelHelper(name="test", init_params=False)
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1) # noqa
self.assertEqual(len(model.net.Proto().op), 1)
self.assertEqual(len(model.param_init_net.Proto().op), 0)
|
pytorch-master
|
caffe2/python/modeling/initializers_test.py
|
pytorch-master
|
caffe2/python/modeling/__init__.py
|
|
from caffe2.python import core, schema
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
class ComputeHistogramForBlobs(NetModifier):
"""
This class modifies the net passed in by adding ops to compute histogram for
certain blobs.
Args:
blobs: list of blobs to compute histogram for
logging_frequency: frequency for printing
lower_bound: left boundary of histogram values
upper_bound: right boundary of histogram values
num_buckets: number of buckets to use in [lower_bound, upper_bound)
accumulate: boolean to output accumulate or per-batch histogram
"""
def __init__(self, blobs, logging_frequency, num_buckets=30,
lower_bound=0.0, upper_bound=1.0, accumulate=False):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._accumulate = accumulate
if self._accumulate:
self._field_name_suffix = '_acc_normalized_hist'
else:
self._field_name_suffix = '_curr_normalized_hist'
self._num_buckets = int(num_buckets)
assert self._num_buckets > 0, (
"num_buckets need to be greater than 0, got {}".format(num_buckets))
self._lower_bound = float(lower_bound)
self._upper_bound = float(upper_bound)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
blob_float = net.Cast(blob, net.NextScopedBlob(prefix=blob +
'_float'), to=core.DataType.FLOAT)
curr_hist, acc_hist = net.AccumulateHistogram(
[blob_float],
[net.NextScopedBlob(prefix=blob + '_curr_hist'),
net.NextScopedBlob(prefix=blob + '_acc_hist')],
num_buckets=self._num_buckets,
lower_bound=self._lower_bound,
upper_bound=self._upper_bound)
if self._accumulate:
hist = net.Cast(
acc_hist,
net.NextScopedBlob(prefix=blob + '_cast_hist'),
to=core.DataType.FLOAT)
else:
hist = net.Cast(
curr_hist,
net.NextScopedBlob(prefix=blob + '_cast_hist'),
to=core.DataType.FLOAT)
normalized_hist = net.NormalizeL1(
hist,
net.NextScopedBlob(prefix=blob + self._field_name_suffix)
)
if self._logging_frequency >= 1:
net.Print(normalized_hist, [], every_n=self._logging_frequency)
if modify_output_record:
output_field_name = str(blob) + self._field_name_suffix
output_scalar = schema.Scalar((np.float32, (self._num_buckets + 2,)),
normalized_hist)
if net.output_record() is None:
net.set_output_record(
schema.Struct((output_field_name, output_scalar))
)
else:
net.AppendOutputRecordField(
output_field_name,
output_scalar)
def field_name_suffix(self):
return self._field_name_suffix
|
pytorch-master
|
caffe2/python/modeling/compute_histogram_for_blobs.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from caffe2.python import core, schema
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
class GetEntryFromBlobs(NetModifier):
"""
This class modifies the net passed in by adding ops to get a certain entry
from certain blobs.
Args:
blobs: list of blobs to get entry from
logging_frequency: frequency for printing entry values to logs
i1, i2: the first, second dimension of the blob. (currently, we assume
the blobs to be 2-dimensional blobs). When i2 = -1, print all entries
in blob[i1]
"""
def __init__(self, blobs, logging_frequency, i1=0, i2=0):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._i1 = i1
self._i2 = i2
self._field_name_suffix = '_{0}_{1}'.format(i1, i2) if i2 >= 0 \
else '_{0}_all'.format(i1)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
i1, i2 = [self._i1, self._i2]
if i1 < 0:
raise ValueError('index is out of range')
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
blob_i1 = net.Slice([blob], starts=[i1, 0], ends=[i1 + 1, -1])
if self._i2 == -1:
blob_i1_i2 = net.Copy([blob_i1],
[net.NextScopedBlob(prefix=blob + '_{0}_all'.format(i1))])
else:
blob_i1_i2 = net.Slice([blob_i1],
net.NextScopedBlob(prefix=blob + '_{0}_{1}'.format(i1, i2)),
starts=[0, i2], ends=[-1, i2 + 1])
if self._logging_frequency >= 1:
net.Print(blob_i1_i2, [], every_n=self._logging_frequency)
if modify_output_record:
output_field_name = str(blob) + self._field_name_suffix
output_scalar = schema.Scalar((np.float), blob_i1_i2)
if net.output_record() is None:
net.set_output_record(
schema.Struct((output_field_name, output_scalar))
)
else:
net.AppendOutputRecordField(output_field_name, output_scalar)
def field_name_suffix(self):
return self._field_name_suffix
|
pytorch-master
|
caffe2/python/modeling/get_entry_from_blobs.py
|
from caffe2.python import brew, model_helper, scope
from caffe2.python.modeling.parameter_sharing import (
ParameterSharing,
parameter_sharing_context,
)
from caffe2.python.modeling.initializers import (
Initializer
)
import unittest
class ParameterSharingTest(unittest.TestCase):
def test_parameter_sharing_default_scopes(self):
# Test no sharing default scopes
param_1 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_1, 'w')
with scope.NameScope('scope'):
param_2 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_2, 'scope/w')
with scope.NameScope('scope_2'):
param_3 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_3, 'scope/scope_2/w')
def test_parameter_sharing_nested_scopes(self):
# Test parameter sharing
with scope.NameScope('global_scope'):
with ParameterSharing({'model_b': 'model_a'}):
param_global = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_global, 'global_scope/w')
# This scope is overridden to match 'model_a'
with scope.NameScope('model_b'):
with ParameterSharing({'shared_scope': ''}):
param_4 = parameter_sharing_context.get_parameter_name(
'w')
self.assertEquals(param_4, 'global_scope/model_a/w')
with scope.NameScope('shared_scope'):
param_5 = parameter_sharing_context.\
get_parameter_name('w')
self.assertEquals(param_5, 'global_scope/model_a/w')
# This scope is supposed to have not sharing
with scope.NameScope('model_c'):
with ParameterSharing({'shared_scope': ''}):
param_4 = parameter_sharing_context.get_parameter_name(
'w')
self.assertEquals(param_4, 'global_scope/model_c/w')
with scope.NameScope('shared_scope'):
param_5 = parameter_sharing_context.\
get_parameter_name('w')
self.assertEquals(param_5, 'global_scope/model_c/w')
def test_parameter_sharing_subscopes(self):
# Sharing only one of the subscopes
with ParameterSharing({'global_scope/b': 'global_scope/a'}):
with scope.NameScope('global_scope'):
param_6 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_6, 'global_scope/w')
with scope.NameScope('a'):
param_7 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_7, 'global_scope/a/w')
with scope.NameScope('b'):
param_8 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_8, 'global_scope/a/w')
with scope.NameScope('c'):
param_9 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_9, 'global_scope/c/w')
def test_create_param(self):
model = model_helper.ModelHelper(name="test")
# Test no sharing default scopes
p1 = model.create_param(
'w',
shape=[2],
initializer=Initializer("ConstantFill")
)
with scope.NameScope('some_global_scope'):
p2 = model.create_param(
'w',
shape=[2],
initializer=Initializer("ConstantFill")
)
self.assertNotEqual(model.get_param_info(p1), None)
self.assertNotEqual(model.get_param_info(p2), None)
self.assertNotEqual(model.get_param_info(p1), model.get_param_info(p2))
model.Validate()
def test_deep_hierarchy(self):
model = model_helper.ModelHelper(name="test")
with ParameterSharing({'a': 'b'}):
with scope.NameScope('a'):
with ParameterSharing({'c': 'd'}):
with scope.NameScope('c'):
with ParameterSharing({'e': 'f'}):
with scope.NameScope('e'):
p = model.create_param(
'w',
shape=[2],
initializer=Initializer("ConstantFill")
)
self.assertNotEqual(model.get_param_info(p), None)
def test_parameter_sharing_brew(self):
# Test no sharing default scopes
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=16, dim_out=16)
# Shared params are expected to share the same shape and fail if it's
# not true
with self.assertRaises(AssertionError):
_ = brew.fc(model, data, "fc1", dim_in=2, dim_out=2) # noqa
output_blobs = set()
with scope.NameScope('some_global_scope'):
with scope.NameScope('model_a'):
output_blobs.add(str(brew.fc(model, fc1, 'output', 16, 16)))
with ParameterSharing({'model_b': 'model_a'}),\
scope.NameScope('model_b'):
with ParameterSharing({'shared_1': '', 'shared_2': ''}):
# All params in DenseLayers from shared_1, shared_2 and
# model_a are shared and will be pointing to:
# [some_global_scope/model_a/output_W,
# some_global_scope/model_a/output_b]
with scope.NameScope('shared_1'):
output_blobs.add(
str(brew.fc(model, fc1, 'output', 16, 16)))
with scope.NameScope('shared_2'):
output_blobs.add(
str(brew.fc(model, fc1, 'output', 16, 16)))
# Params of this layer are not shared with anyone unless
# there is some explicit sharing with model_a/unshared (not
# in this example).
# Names of the blobs are
# [some_global_scope/model_a/unshared/output_W,
# some_global_scope/model_a/unshared/output_b]
with scope.NameScope('unshared'):
output_blobs.add(
str(brew.fc(model, fc1, 'output', 16, 16)))
self.assertEqual(len(model._parameters_info), 6)
self.assertEqual(len(output_blobs), 4)
self.assertEqual(sorted(model._parameters_info.keys()), [
'fc1_b',
'fc1_w',
'some_global_scope/model_a/output_b',
'some_global_scope/model_a/output_w',
'some_global_scope/model_a/unshared/output_b',
'some_global_scope/model_a/unshared/output_w',
])
model.Validate()
|
pytorch-master
|
caffe2/python/modeling/parameter_sharing_test.py
|
from caffe2.python import core, schema
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
class ComputeStatisticsForBlobs(NetModifier):
"""
This class modifies the net passed in by adding ops to compute statistics
for certain blobs. For each blob in the list, its min, max, mean and standard
deviation will be computed.
Args:
blobs: list of blobs to compute norm for
logging_frequency: frequency for printing norms to logs
"""
def __init__(self, blobs, logging_frequency):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._field_name_suffix = '_summary'
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
assert net.BlobIsDefined(blob), 'blob {} is not defined in net {} whose proto is {}'.format(blob, net.Name(), net.Proto())
cast_blob = net.Cast(blob, to=core.DataType.FLOAT)
stats_name = net.NextScopedBlob(prefix=blob + self._field_name_suffix)
stats = net.Summarize(cast_blob, stats_name, to_file=0)
net.Print(stats, [], every_n=self._logging_frequency)
if modify_output_record:
output_field_name = str(blob) + self._field_name_suffix
output_scalar = schema.Scalar((np.float, (1,)), stats)
if net.output_record() is None:
net.set_output_record(
schema.Struct((output_field_name, output_scalar))
)
else:
net.AppendOutputRecordField(
output_field_name,
output_scalar)
def field_name_suffix(self):
return self._field_name_suffix
|
pytorch-master
|
caffe2/python/modeling/compute_statistics_for_blobs.py
|
from caffe2.python.core import DataType, BlobReference, ScopedBlobReference
from caffe2.python.modeling.parameter_info import ParameterInfo
class Initializer(object):
'''
This class abstracts out parameter creation. One can come up with a new
Initializer in order to implement more complex parameter initialization logic
'''
def __init__(self, operator_name=None, **kwargs):
self.operator_name = operator_name
self.operator_kwargs = kwargs
def update(self, operator_name, kwargs):
if self.operator_name is not None:
raise Exception("Operator name overwrites are not allowed")
self.operator_name = operator_name
self.operator_kwargs = kwargs
def create_param(self, param_name, init_net, shape):
param = init_net.__getattr__(self.operator_name)(
[], param_name, shape=shape, **self.operator_kwargs)
return ParameterInfo(
param_id=None,
param=param,
shape=shape,
)
class ExternalInitializer(object):
'''
This class is used in cases when the parameter should not be initialized by
the initializer, but rather provided in the workspace when param_init_net is
executed.
Current version is not doing any real sanity checks to the parameter.
'''
def create_param(self, param_name, init_net, shape):
if isinstance(param_name, BlobReference):
param = BlobReference(str(param_name), init_net)
elif isinstance(param_name, str):
param = ScopedBlobReference(param_name, init_net)
else:
raise TypeError("Unsupported type for param_name")
# TODO(amalevich): Add operator that will check param in the workspace
return ParameterInfo(
param_id=None,
param=param,
shape=shape,
)
class PseudoFP16Initializer(Initializer):
'''
Used in cases when the parameter should be used at half (16-bit) precision
for compute purposes (i.e. on the forward and backward pass) but
needs to be stored and optimized at single (32-bit) precision so tiny
gradients with small learning rates don't underflow FP16 precision.
A 32-bit copy of the 16-bit blob is stored in the ParameterInfo.
This is helpful for mixed-precision training, see
https://arxiv.org/abs/1710.03740 for details.
'''
def update(self, operator_name, kwargs):
if self.operator_name is not None:
raise Exception("Operator name overwrites are not allowed")
self.operator_name = operator_name
self.operator_kwargs = kwargs
def create_param(self, param_name, init_net, shape):
# create master fp32 copy
param_fp32 = init_net.__getattr__(self.operator_name)(
[], param_name + "_fp32", shape=shape,
**self.operator_kwargs)
# cast to fp16 copy
param = init_net.FloatToHalf(
param_fp32, param_name)
return ParameterInfo(
param_id=None,
param=param,
shape=shape,
blob_copy={DataType.FLOAT: param_fp32}
)
class ReversePseudoFP16Initializer(Initializer):
'''
Like PseudoFP16Initializer above, except the primary blob is taken to
be the 32-bit precision parameter, and the 16-bit version of the blob
is stored in blob_copy instead.
'''
def update(self, operator_name, kwargs):
if self.operator_name is not None:
raise Exception("Operator name overwrites are not allowed")
self.operator_name = operator_name
self.operator_kwargs = kwargs
def create_param(self, param_name, init_net, shape):
# create master fp32 copy
param_fp32 = init_net.__getattr__(self.operator_name)(
[], param_name, shape=shape,
**self.operator_kwargs)
# cast to fp16 copy
param_fp16 = init_net.FloatToHalf(
param_fp32, param_name + "_fp16")
return ParameterInfo(
param_id=None,
param=param_fp32,
shape=shape,
blob_copy={DataType.FLOAT16: param_fp16}
)
def update_initializer(initializer_class,
operator_name_and_kwargs,
default_operator_name_and_kwargs):
'''
A helper function to convert from operator_name_and_kwargs to new
object of type initializer_class. This function serves two purposes:
1. Support for custom initialization operators being passed in
2. Allow user to specify a custom Initializer without overwriting
default operators used for initialization
If initializer_class is None, creates a default initializer using
the Initializer class and operator_name_and_kwargs provided
If operator_name_and_kwargs is None, uses default_operator_name_and_kwargs
returns an instantiated Initializer object
'''
def get_initializer_args():
return (
operator_name_and_kwargs or
default_operator_name_and_kwargs
)
if initializer_class is not None:
init = initializer_class(get_initializer_args()[0],
**get_initializer_args()[1])
else:
init = Initializer(
get_initializer_args()[0],
**get_initializer_args()[1]
)
return init
|
pytorch-master
|
caffe2/python/modeling/initializers.py
|
from caffe2.python import core
from caffe2.proto import caffe2_pb2
from caffe2.python.optimizer import get_param_device
from caffe2.python.modeling.net_modifier import NetModifier
import logging
logger = logging.getLogger(__name__)
class GradientClipping(NetModifier):
L1_NORM = 'l1_norm'
L2_NORM = 'l2_norm'
BY_NORM = 'by_norm'
BY_VALUE = 'by_value'
GRAD_CLIP_METHODS = [BY_NORM, BY_VALUE]
CLIP_GRADIENT_NORM_TYPES = [L2_NORM, L1_NORM]
def __init__(self, grad_clip_method, clip_norm_type='l2_norm',
clip_threshold=0.1, use_parameter_norm=False,
compute_norm_ratio=False, clip_max=1, clip_min=-1,
blobs_to_include=None, blobs_to_exclude=None):
"""
Clips gradient to avoid gradient magnitude explosion or vanishing gradient.
Args:
grad_clip_method: ways to clip the gradients
clip_norm_type: type of norm used in the necessary computation
clip_threshold: threshold used to determine whether to clip
use_parameter_norm: a boolean to indicate whether to incorporate
the norm of the parameter
compute_norm_ratio: a boolean to compute the ratio between gradient norm
and parameter norm explicitly for debugging purpose
clip_max: when clipping by_value, any value that is greater than
clip_max will be clipped to clip_max
clip_min: when clipping by_value, any value that is smaller than
clip_min will be clipped to clip_min
blobs_to_include: names of blobs whose gradient is to be clipped. If it is set
to none, all param 's gradient in grad_map will be clipped.
blobs_to_exclude: names of blobs whose gradient is not to be clipped.
"""
assert grad_clip_method in self.GRAD_CLIP_METHODS, (
"This method of clipping, {}, has not been implemented.".format(
clip_norm_type))
if clip_norm_type is not None:
assert clip_norm_type in self.CLIP_GRADIENT_NORM_TYPES, (
"This method of clipping, {}, has not been implemented.".format(
clip_norm_type))
self.grad_clip_method = grad_clip_method
self.clip_norm_type = clip_norm_type
self.clip_threshold = float(clip_threshold)
self.use_parameter_norm = use_parameter_norm
self.compute_norm_ratio = compute_norm_ratio
self.clip_max = float(clip_max)
self.clip_min = float(clip_min)
self.blobs_to_include = blobs_to_include
self.blobs_to_exclude = blobs_to_exclude
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
assert grad_map is not None
CPU = core.DeviceOption(caffe2_pb2.CPU)
final_param_map = {}
if self.blobs_to_include is None:
final_param_map = grad_map
else:
for blob in self.blobs_to_include:
param = core.BlobReference(blob)
if not net.BlobIsDefined(param):
raise Exception('param {0} is not defined in net {1}'.format(
param, net.Name()))
final_param_map[param] = grad_map[param]
if self.blobs_to_exclude is not None:
for blob in self.blobs_to_exclude:
final_param_map.pop(blob, None)
for param, grad in final_param_map.items():
# currently sparse gradients won't be clipped
# further implementation is needed to enable it
if isinstance(grad, core.GradientSlice):
continue
device = get_param_device(
param,
grad_map[str(param)],
param_to_device=blob_to_device,
default_device=CPU,
)
with core.DeviceScope(device):
if self.grad_clip_method == self.BY_NORM:
if self.clip_norm_type == self.L2_NORM:
p = 2
elif self.clip_norm_type == self.L1_NORM:
p = 1
grad_norm = net.LpNorm(
[grad],
net.NextScopedBlob(prefix=str(grad) + '_l{}_norm'.format(p)),
p=p,
)
if p == 2:
grad_norm = net.Pow([grad_norm], exponent=0.5)
op_inputs = [grad, grad_norm]
if self.use_parameter_norm:
param_norm = net.LpNorm(
[param],
net.NextScopedBlob(
prefix=str(param) + '_l{}_norm'.format(p)),
p=p,
)
if p == 2:
param_norm = net.Pow([param_norm], exponent=0.5)
op_inputs.append(param_norm)
if self.compute_norm_ratio:
net.Div(
[grad_norm, param_norm],
[net.NextScopedBlob(
prefix=str(param) + "_norm_ratio")]
)
net.ClipTensorByScaling(
op_inputs,
[grad],
threshold=self.clip_threshold,
)
elif self.grad_clip_method == self.BY_VALUE:
net.Clip(
[grad],
[grad],
max=self.clip_max,
min=self.clip_min,
)
|
pytorch-master
|
caffe2/python/modeling/gradient_clipping.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.get_entry_from_blobs import GetEntryFromBlobs
import numpy as np
class GetEntryFromBlobsTest(unittest.TestCase):
def test_get_entry_from_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=10, dim_out=8)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=8, dim_out=4)
i1, i2 = np.random.randint(4, size=2)
net_modifier = GetEntryFromBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
i1=i1,
i2=i2,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 10).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_entry = workspace.FetchBlob('fc1_w_{0}_{1}'.format(i1, i2))
self.assertEqual(fc1_w_entry.size, 1)
self.assertEqual(fc1_w_entry[0], fc1_w[i1][i2])
assert model.net.output_record() is None
def test_get_entry_from_blobs_modify_output_record(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=4)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=4, dim_out=4)
i1, i2 = np.random.randint(4), np.random.randint(5) - 1
net_modifier = GetEntryFromBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
i1=i1,
i2=i2,
)
net_modifier(model.net, modify_output_record=True)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
if i2 < 0:
fc1_w_entry = workspace.FetchBlob('fc1_w_{0}_all'.format(i1))
else:
fc1_w_entry = workspace.FetchBlob('fc1_w_{0}_{1}'.format(i1, i2))
if i2 < 0:
self.assertEqual(fc1_w_entry.size, 4)
for j in range(4):
self.assertEqual(fc1_w_entry[0][j], fc1_w[i1][j])
else:
self.assertEqual(fc1_w_entry.size, 1)
self.assertEqual(fc1_w_entry[0], fc1_w[i1][i2])
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
|
pytorch-master
|
caffe2/python/modeling/get_entry_from_blobs_test.py
|
import abc
class NetModifier(metaclass=abc.ABCMeta):
"""
An abstraction class for supporting modifying a generated net.
Inherited classes should implement the modify_net method where
related operators are added to the net.
Example usage:
modifier = SomeNetModifier(opts)
modifier(net)
"""
def __init__(self):
pass
@abc.abstractmethod
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None):
pass
def __call__(self, net, init_net=None, grad_map=None, blob_to_device=None,
modify_output_record=False):
self.modify_net(
net,
init_net=init_net,
grad_map=grad_map,
blob_to_device=blob_to_device,
modify_output_record=modify_output_record)
|
pytorch-master
|
caffe2/python/modeling/net_modifier.py
|
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.compute_statistics_for_blobs import (
ComputeStatisticsForBlobs
)
import numpy as np
class ComputeStatisticsForBlobsTest(unittest.TestCase):
def test_compute_statistics_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeStatisticsForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_summary = workspace.FetchBlob('fc1_w_summary')
# std is unbiased here
stats_ref = np.array([fc1_w.flatten().min(), fc1_w.flatten().max(),
fc1_w.flatten().mean(), fc1_w.flatten().std(ddof=1)])
self.assertAlmostEqual(np.linalg.norm(stats_ref - fc1_w_summary), 0,
delta=1e-5)
self.assertEqual(fc1_w_summary.size, 4)
assert model.net.output_record() is None
def test_compute_statistics_for_blobs_modify_output_record(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeStatisticsForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
)
net_modifier(model.net, modify_output_record=True)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_summary = workspace.FetchBlob('fc1_w_summary')
# std is unbiased here
stats_ref = np.array([fc1_w.flatten().min(), fc1_w.flatten().max(),
fc1_w.flatten().mean(), fc1_w.flatten().std(ddof=1)])
self.assertAlmostEqual(np.linalg.norm(stats_ref - fc1_w_summary), 0,
delta=1e-5)
self.assertEqual(fc1_w_summary.size, 4)
self.assertEqual(len(model.net.Proto().op), 8)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs()
|
pytorch-master
|
caffe2/python/modeling/compute_statistics_for_blobs_test.py
|
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.compute_histogram_for_blobs import (
ComputeHistogramForBlobs
)
import numpy as np
class ComputeHistogramForBlobsTest(unittest.TestCase):
def histogram(self, X, lower_bound=0.0, upper_bound=1.0, num_buckets=20):
assert X.ndim == 2, ('this test assume 2d array, but X.ndim is {0}'.
format(X.ndim))
N, M = X.shape
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist = hist.astype(np.float32) / (N * M)
acc_hist = cur_hist
return [cur_hist, acc_hist]
def test_compute_histogram_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
num_buckets = 20
lower_bound = 0.2
upper_bound = 0.8
accumulate = False
net_modifier = ComputeHistogramForBlobs(blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
num_buckets=num_buckets,
lower_bound=lower_bound,
upper_bound=upper_bound,
accumulate=accumulate)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_curr_normalized_hist = workspace.FetchBlob('fc1_w_curr_normalized_hist')
cur_hist, acc_hist = self.histogram(fc1_w,
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
self.assertEqual(fc1_w_curr_normalized_hist.size, num_buckets + 2)
self.assertAlmostEqual(np.linalg.norm(
fc1_w_curr_normalized_hist - cur_hist), 0.0, delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 12)
assert model.net.output_record() is None
def test_compute_histogram_for_blobs_modify_output_record(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
num_buckets = 20
lower_bound = 0.2
upper_bound = 0.8
accumulate = False
net_modifier = ComputeHistogramForBlobs(blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
num_buckets=num_buckets,
lower_bound=lower_bound,
upper_bound=upper_bound,
accumulate=accumulate)
net_modifier(model.net, modify_output_record=True)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_curr_normalized_hist = workspace.FetchBlob('fc1_w_curr_normalized_hist')
cur_hist, acc_hist = self.histogram(fc1_w,
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
self.assertEqual(fc1_w_curr_normalized_hist.size, num_buckets + 2)
self.assertAlmostEqual(np.linalg.norm(
fc1_w_curr_normalized_hist - cur_hist), 0.0, delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 12)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
|
pytorch-master
|
caffe2/python/modeling/compute_histogram_for_blobs_test.py
|
from caffe2.python import scope
import contextlib
import logging
logger = logging.getLogger(__name__)
class ParameterSharingContext(object):
"""
This class manages scope driven way of parameter sharing across different
NameScopes.
"""
def __init__(self):
self._scope_overrides = {}
self._contexts = []
def _resolve_scope_overrides(self, candidate_scope):
"""
Recursively resolves all scope overrides, i.e multiple steps of
override can be used.
For example, if one provides following scope overrides:
{'scope_b': 'scope_a'} and within 'scope_b' - {'shared_child': ''},
then name 'w' will get resolved to the following blobs depending on the
namescope:
a. 'scope_a' -> 'scope_a/w'
b. 'scope_b' -> 'scope_a/w'
c. 'scope_c' -> 'scope_c/w'
d. 'scope_b/shared_child' -> 'scope_a/w'
d. 'scope_b/unshared_child' -> 'scope_a/unshared_child/w'
"""
best_scope = candidate_scope
best_scope_idx = 0
sub_scopes = candidate_scope.split(scope._NAMESCOPE_SEPARATOR)
cur_scope = ''
for idx, sub_scope in enumerate(sub_scopes):
cur_scope = cur_scope + sub_scope + scope._NAMESCOPE_SEPARATOR
if cur_scope in self._scope_overrides:
best_scope = self._scope_overrides[cur_scope]
best_scope_idx = idx
if best_scope == candidate_scope:
return candidate_scope
else:
return (self._resolve_scope_overrides(best_scope) +
scope._NAMESCOPE_SEPARATOR.join(
sub_scopes[best_scope_idx + 1:]))
def get_parameter_name(self, name):
candidate_scope = scope.CurrentNameScope()
best_scope = self._resolve_scope_overrides(candidate_scope)
if best_scope != candidate_scope:
logger.info("Overwriting scope {0} with scope {1}".format(
candidate_scope, best_scope))
return best_scope + name
def add_scope_overrides(self, shared_scopes):
self._contexts.append(shared_scopes)
self._scope_overrides.update(shared_scopes)
def pop(self):
assert len(self._contexts) > 0
self._contexts.pop()
self._scope_overrides = {}
for x in self._contexts:
self._scope_overrides.update(x)
parameter_sharing_context = ParameterSharingContext()
def _normalize_namescope(namescope):
if namescope and namescope[-1] != scope._NAMESCOPE_SEPARATOR:
return namescope + scope._NAMESCOPE_SEPARATOR
else:
return namescope
@contextlib.contextmanager
def ParameterSharing(shared_scopes):
"""
Helper function for sharing scopes.
All the parameters within the shared_scopes, will be remapped with the
respect of CurrentNamescope()
I.e. if one calls ParameterSharing with {'scope_b': 'scope_'a'}, from the
scope 'some_global_scope', it'll effectively mean, that all parameters from
'some_global_scope/scope_b' will shared with the parameters from
'some_global_scope/scope_a'
"""
assert isinstance(shared_scopes, dict)
shared_scope_overrides = {}
current_scope = scope.CurrentNameScope()
for k, v in shared_scopes.items():
assert not v.startswith(k), (
"Illegal override for parameter sharing. {} is prefix of {}".
format(k, v))
k = current_scope + k
v = current_scope + v
# Normalize all the scopes, so scope_a and scope_a/ are equivalent
k = _normalize_namescope(k)
v = _normalize_namescope(v)
shared_scope_overrides[k] = v
try:
parameter_sharing_context.add_scope_overrides(shared_scope_overrides)
yield
finally:
parameter_sharing_context.pop()
|
pytorch-master
|
caffe2/python/modeling/parameter_sharing.py
|
from caffe2.python import core
import numpy as np
class ParameterTags(object):
BIAS = 'BIAS'
WEIGHT = 'WEIGHT'
COMPUTED_PARAM = 'COMPUTED_PARAM'
class ParameterInfo(object):
def __init__(
self, param_id, param, key=None, shape=None, length=None,
grad=None, blob_copy=None):
assert isinstance(param, core.BlobReference)
self.param_id = param_id
self.name = str(param)
self.blob = param
self.key = key
self.shape = shape
self.size = None if shape is None else np.prod(shape)
self.length = max(1, length if length is not None else 1)
self.grad = grad
self._cloned_init_net = None
# Optionally store equivalent copies of the blob
# in different precisions (i.e. half and float copies)
# stored as a dict of TensorProto.DataType -> BlobReference
self.blob_copy = blob_copy
# each param_info can have its own optimizer. It can be set within
# OptimizerContext (caffe2/python/optimizer.py)
self._optimizer = None
@property
def parameter(self):
return self.blob
@property
def optimizer(self):
return self._optimizer
@optimizer.setter
def optimizer(self, value):
assert self._optimizer is None, "optimizer has already been set"
self._optimizer = value
def __str__(self):
return self.name
|
pytorch-master
|
caffe2/python/modeling/parameter_info.py
|
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.compute_norm_for_blobs import ComputeNormForBlobs
import numpy as np
class ComputeNormForBlobsTest(unittest.TestCase):
def test_compute_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_l2_norm = workspace.FetchBlob('fc1_w_l2_norm')
self.assertEqual(fc1_w_l2_norm.size, 1)
self.assertAlmostEqual(fc1_w_l2_norm[0],
np.linalg.norm(fc1_w)**2,
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 10)
assert model.net.output_record() is None
def test_compute_norm_for_blobs_modify_output_record(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
)
net_modifier(model.net, modify_output_record=True)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_l2_norm = workspace.FetchBlob('fc1_w_l2_norm')
self.assertEqual(fc1_w_l2_norm.size, 1)
self.assertAlmostEqual(fc1_w_l2_norm[0],
np.linalg.norm(fc1_w)**2,
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 10)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
def test_compute_averaged_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
compute_averaged_norm=True,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_averaged_l2_norm = workspace.FetchBlob('fc1_w_averaged_l2_norm')
self.assertEqual(fc1_w_averaged_l2_norm.size, 1)
self.assertAlmostEqual(fc1_w_averaged_l2_norm[0],
np.linalg.norm(fc1_w)**2 / fc1_w.size,
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 10)
def test_compute_norm_for_blobs_no_print(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=-1,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_l2_norm = workspace.FetchBlob('fc1_w_l2_norm')
self.assertEqual(fc1_w_l2_norm.size, 1)
self.assertAlmostEqual(fc1_w_l2_norm[0],
np.linalg.norm(fc1_w)**2,
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 8)
def test_compute_l1_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
p=1,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_l1_norm = workspace.FetchBlob('fc1_w_l1_norm')
self.assertEqual(fc1_w_l1_norm.size, 1)
self.assertAlmostEqual(fc1_w_l1_norm[0],
np.sum(np.abs(fc1_w)),
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 10)
def test_compute_l1_averaged_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
p=1,
compute_averaged_norm=True,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_averaged_l1_norm = workspace.FetchBlob('fc1_w_averaged_l1_norm')
self.assertEqual(fc1_w_averaged_l1_norm.size, 1)
self.assertAlmostEqual(fc1_w_averaged_l1_norm[0],
np.sum(np.abs(fc1_w)) / fc1_w.size,
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 10)
def test_compute_norm_row_index_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w'],
logging_frequency=10,
compute_averaged_norm=True,
row_index=1
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_row_1_averaged_l2_norm = workspace.FetchBlob('fc1_w_row_1_averaged_l2_norm')
self.assertEqual(fc1_w_row_1_averaged_l2_norm.size, 1)
self.assertAlmostEqual(fc1_w_row_1_averaged_l2_norm[0],
np.linalg.norm(fc1_w[1])**2 / fc1_w[1].size,
delta=1e-5)
|
pytorch-master
|
caffe2/python/modeling/compute_norm_for_blobs_test.py
|
import copy
from caffe2.proto import caffe2_pb2
from caffe2.python import core
def rewrite_init_net_simple(net):
for op in net.op:
op.device_option.device_type = caffe2_pb2.IDEEP
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
def fix_BoxWithNMSLimit(net):
outputs = set()
for op in net.op:
if op.type == 'BoxWithNMSLimit':
outputs.add(op.output[0])
outputs.add(op.output[1])
outputs.add(op.output[2])
for op in net.op:
if op.type == 'CopyIDEEPToCPU':
if op.input[0] in outputs:
print("Chaning CopyIDEEPToCPU to Copy for {}".format(op.input[0]))
op.type = 'Copy'
op.device_option.device_type = caffe2_pb2.CPU
def rewrite_run_net_simple(net):
# Simple rewrite for now - assume entire graph can be executed
# with MKL, so just insert copy ops for external_input[0] and
# external_output[0]
def mkl_tmp(name):
return "{}__MKL__".format(name)
input_blob = net.external_input[0]
if input_blob != net.op[0].input[0]:
raise Exception(
"Input blob: {} is not consumed by first op: {}".format(
input_blob, net.op[0]))
# Modify input/outputs to point to copied MKL blobs.
from_cpu = "CopyCPUToIDEEP"
to_cpu = "CopyIDEEPToCPU"
copy_input_op = core.CreateOperator(
from_cpu, input_blob, mkl_tmp(input_blob))
net.op[0].input[0] = mkl_tmp(input_blob)
copy_output_ops = [
core.CreateOperator(to_cpu, mkl_tmp(output_blob), output_blob)
for output_blob in net.external_output]
for output_blob in net.external_output:
last_producer_idx = last_producer(net.op, output_blob)
renamed_outputs = [blob if blob != output_blob else mkl_tmp(blob)
for blob in net.op[last_producer_idx].output]
net.op[last_producer_idx].output[:] = renamed_outputs
# Rename any subsequent consumers of an output blob.
for op in net.op[last_producer_idx + 1:]:
renamed_input = [blob if blob != output_blob else mkl_tmp(blob)
for blob in op.input]
op.input[:] = renamed_input
ops = [copy_input_op] + net.op[:] + copy_output_ops
del net.op[:]
net.op.extend(ops)
device = caffe2_pb2.IDEEP
for op in net.op:
op.device_option.MergeFrom(
core.DeviceOption(device_type=device))
op.engine = ""
# Temporarily disable conv+relu fusion until we verify further
# net.ParseFromString(
# C.transform_optimizeForMKLDNN(net.SerializeToString()))
fix_BoxWithNMSLimit(net)
def rewrite_run_net_simple_xrayocr_lstm(net):
# For xrayocr model with lstm, only rewrite the non-lstm part of the net to
# enable mkl, then copy the temporary output blob at the break point
# and all external inputs for lstm part to cpu, and execuate rest of the net
# (two lstm) on cpu
# This only works for the xrayocr lstm model which uses the first 'Shape' op
# to decide the break point, and after two lstm it's external_output
# directly so there's no need to copy back to ideep/mkl
def mkl_tmp(name):
return "{}__MKL__".format(name)
def cpu_tmp(name):
return "{}__CPU__".format(name)
input_blob = net.external_input[0]
if input_blob != net.op[0].input[0]:
raise Exception(
"Input blob: {} is not consumed by first op: {}".format(
input_blob, net.op[0]))
# Modify input/outputs to point to copied MKL blobs.
from_cpu = "CopyCPUToIDEEP"
to_cpu = "CopyIDEEPToCPU"
copy_input_op = core.CreateOperator(
from_cpu, input_blob, mkl_tmp(input_blob))
net.op[0].input[0] = mkl_tmp(input_blob)
# the net may contain some external_inputs falsely added during ONNX->Caffe2
# This should be taken care of in early steps during pytorch_to_caffe2,
# but if not it can cause issue in follow up steps, so check here to confirm
for input_blob in net.external_input:
for op in net.op:
# look for if the external_input blob is output of any op in the net
assert input_blob not in op.output
external_output = None
external_inputs_to_cpu = set()
find_first_shape_op = False
cpu_op_start_idx = -1
for op_idx, op in enumerate(net.op):
# the first Shape op mark the starting point of LSTM chunk of the net
if not find_first_shape_op:
if op.type == 'Shape':
external_output = op.input
find_first_shape_op = True
cpu_op_start_idx = op_idx
else:
# any external input in the LSTM part need to be copied to CPU
for in_blob in op.input:
if in_blob in net.external_input:
external_inputs_to_cpu.add(in_blob)
# make sure we found the expected break point of the net
assert external_output is not None
# create op to copy external input blobs used in LSTM part from IDEEP to CPU
copy_extra_input_ops = []
for in_blob in external_inputs_to_cpu:
copy_extra_input_ops.append(core.CreateOperator(to_cpu, in_blob,
cpu_tmp(in_blob)))
# rename input blobs in LSTM part to use the CPU copy
for op in net.op[cpu_op_start_idx:]:
renamed_input = [blob if blob != in_blob else cpu_tmp(in_blob)
for blob in op.input]
op.input[:] = renamed_input
copy_output_ops = [
core.CreateOperator(to_cpu, mkl_tmp(output_blob), output_blob)
for output_blob in external_output]
for output_blob in external_output:
last_producer_idx = last_producer(net.op, output_blob)
renamed_outputs = [blob if blob != output_blob else mkl_tmp(blob)
for blob in net.op[last_producer_idx].output]
net.op[last_producer_idx].output[:] = renamed_outputs
# rearrange all ops in correct order
ops = [copy_input_op] + net.op[:cpu_op_start_idx] \
+ copy_output_ops + copy_extra_input_ops + net.op[cpu_op_start_idx:]
del net.op[:]
net.op.extend(ops)
device = caffe2_pb2.IDEEP
for op in net.op:
# the first Shape op mark the starting point of LSTM chunk of the net
if op.type == 'Shape':
# all LSTM ops should run on CPU
device = caffe2_pb2.CPU
op.device_option.MergeFrom(
core.DeviceOption(device_type=device))
op.engine = ""
# RecurrentNetwork has a nested step_net that needs special treatment
if op.type == 'RecurrentNetwork':
for arg in op.arg:
if arg.name == 'step_net':
for nested_op in arg.n.op:
# set device to CPU
nested_op.device_option.MergeFrom(
core.DeviceOption(device_type=device))
nested_op.engine = ""
# rename inputs in op of nested net
renamed_input = []
for blob in nested_op.input:
renamed_input.append(blob
if blob not in external_inputs_to_cpu
else cpu_tmp(blob))
nested_op.input[:] = renamed_input
# rename external inputs of nested net
new_external_input = []
for blob in arg.n.external_input:
new_external_input.append(blob
if blob not in external_inputs_to_cpu
else cpu_tmp(blob))
arg.n.external_input[:] = new_external_input
# Temporarily disable conv+relu fusion until we verify further
# net.ParseFromString(
# C.transform_optimizeForMKLDNN(net.SerializeToString()))
fix_BoxWithNMSLimit(net)
def rewrite_model_helper_simple(model):
model = copy.deepcopy(model)
# All parameter initialization should run on MKL
rewrite_init_net_simple(model.param_init_net.Proto())
rewrite_run_net_simple(model.net.Proto())
return model
|
pytorch-master
|
caffe2/python/mkl/rewrite_graph.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(
not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn."
)
class MKLConcatTest(hu.HypothesisTestCase):
@given(
batch_size=st.integers(1, 10),
channel_splits=st.lists(st.integers(1, 10), min_size=1, max_size=3),
height=st.integers(1, 10),
width=st.integers(1, 10),
**mu.gcs
)
def test_mkl_concat(
self, batch_size, channel_splits, height, width, gc, dc
):
Xs = [
np.random.rand(batch_size, channel,
height, width).astype(np.float32)
for channel in channel_splits
]
op = core.CreateOperator(
"Concat",
["X_{}".format(i) for i in range(len(Xs))],
["concat_result", "split_info"],
order="NCHW",
)
self.assertDeviceChecks(dc, op, Xs, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_concat_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLReluTest(hu.HypothesisTestCase):
@given(size=st.integers(8, 20),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_mkl_relu(self, size, input_channels, batch_size, inplace, gc, dc):
op = core.CreateOperator(
"Relu",
["X"],
["Y"] if not inplace else ["X"],
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_relu_op_test.py
|
import unittest
import numpy as np
import copy
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
from caffe2.python import workspace, brew
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl.rewrite_graph as rewrite_graph
def deterministic_io(model):
model = copy.deepcopy(model)
for i, op in enumerate(model.InitProto().op):
op.device_option.random_seed = i + 1
if not model.Proto().external_output:
model.Proto().external_output.extend([model.Proto().op[-1].output[0]])
return model
def simple_fc():
model = ModelHelper(name="r")
brew.fc(model, "data", "fc", 10, 10)
return model, [(1, 10)]
def double_matmul():
model = ModelHelper(name="r")
fc0 = brew.fc(model, "data", "fc0", 10, 10)
fc1 = brew.fc(model, fc0, "fc1", 10, 10)
model.Proto().external_output[:] = [str(fc0), str(fc1)]
return model, [(1, 10)]
def simple_relu():
model = ModelHelper(name="r")
brew.relu(model, "data", "fc")
return model, [(1, 10)]
def simple_mlp():
model = ModelHelper(name="r")
brew.relu(
model,
brew.fc(
model,
brew.relu(
model,
brew.fc(
model,
"data",
"fc1",
10,
10),
"rl1"),
"fc2",
10,
10),
"rl2")
return model, [(1, 10)]
def simple_cnn():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
brew.conv(
model, "data", 'conv1', 3, 16, kernel=3, stride=1
)
brew.spatial_bn(
model, 'conv1', 'conv1_spatbn', 16, epsilon=1e-3
)
brew.relu(model, 'conv1_spatbn', 'relu1')
return model, [(1, 3, 32, 32)]
def alexnet():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
conv1 = brew.conv(
model,
"data",
"conv1",
3,
64,
11, ('XavierFill', {}), ('ConstantFill', {}),
stride=4,
pad=2
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=3, stride=2, pad=0,
legacy_pad=3)
lrn1 = brew.lrn(
model, pool1, "pool1_lrn", size=5, alpha=1.0e-4, beta=0.75, bias=1.0)
conv2 = brew.conv(
model,
lrn1,
"conv2",
64,
192,
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=3, stride=2)
lrn2 = brew.lrn(
model, pool2, "pool2_lrn", size=5, alpha=1.0e-4, beta=0.75, bias=1.0)
conv3 = brew.conv(
model,
lrn2,
"conv3",
192,
384,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = brew.relu(model, conv3, "conv3")
conv4 = brew.conv(
model,
relu3,
"conv4",
384,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = brew.relu(model, conv4, "conv4")
conv5 = brew.conv(
model,
relu4,
"conv5",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = brew.relu(model, conv5, "conv5")
pool5 = brew.max_pool(model, relu5, "pool5", kernel=3, stride=2)
fc6 = brew.fc(
model,
pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = brew.relu(model, fc6, "fc6")
fc7 = brew.fc(
model, relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = brew.relu(model, fc7, "fc7")
drop7 = brew.dropout(model, relu7, "fc7_dropout", is_test=1, ratio=0.5)
fc8 = brew.fc(
model, drop7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
relu8 = brew.relu(model, fc8, "fc8")
brew.dropout(model, relu8, "fc8_dropout", is_test=1, ratio=0.5)
return model, [(1, 3, 224, 224)]
def simple_resnet():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
resnet.create_resnet_32x32(
model, "data", num_input_channels=1, num_groups=1, num_labels=5,
is_test=True)
return model, [(1, 1, 32, 32)]
def complex_resnet():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
resnet.create_resnet50(
model, "data", num_input_channels=1, num_labels=5, is_test=True,
no_loss=True)
return model, [(1, 1, 224, 224)]
@unittest.skipIf(not workspace.C.use_mkldnn, "No MKLDNN support.")
class MKLRewriteTest(hu.HypothesisTestCase):
@given(gen=st.sampled_from([simple_relu, simple_fc,
simple_mlp, simple_cnn]))
def test_mkl_simple_rewrite(self, gen):
cpu_model, (shape,) = gen()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
X = np.random.randn(*shape).astype(np.float32)
def run(model):
self.ws.run(model.InitProto())
self.ws.create_blob(model.Proto().external_input[0]).feed(X)
self.ws.run(model.Proto())
return self.ws.blobs[model.Proto().external_output[0]].fetch()
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
def test_mkl_resnet_rewrite(self):
cpu_model, (shape,) = complex_resnet()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
np.random.seed(1701)
X = np.random.randn(*shape).astype(np.float32)
def run(model):
self.ws.run(model.InitProto())
self.ws.create_blob(model.Proto().external_input[0]).feed(X)
self.ws.run(model.Proto())
return self.ws.blobs[model.Proto().external_output[0]].fetch()
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
def test_mkl_multi_output_rewrite(self):
cpu_model, shapes = double_matmul()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
np.random.seed(1701)
Xs = [np.random.randn(*shape).astype(np.float32) for shape in shapes]
def run(model):
self.ws.run(model.InitProto())
for (name, X) in zip(model.Proto().external_input, Xs):
self.ws.create_blob(name).feed(X)
print(model.Proto())
self.ws.run(model.Proto())
return [self.ws.blobs[name].fetch()
for name in model.Proto().external_output]
run(mkl_model)
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
def test_mkl_alexnet_rewrite(self):
cpu_model, (shape,) = alexnet()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
np.random.seed(1701)
X = np.random.randn(*shape).astype(np.float32)
def run(model):
self.ws.run(model.InitProto())
self.ws.create_blob(model.Proto().external_input[0]).feed(X)
self.ws.run(model.Proto())
return self.ws.blobs[model.Proto().external_output[0]].fetch()
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/rewrite_graph_test.py
|
pytorch-master
|
caffe2/python/mkl/__init__.py
|
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLConvTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 20),
input_channels=st.integers(1, 16),
output_channels=st.integers(1, 16),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
group=st.integers(1, 8),
**mu.gcs)
def test_mkl_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y"],
stride=stride,
pad=pad,
kernel=kernel,
group=group
)
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels * group).astype(np.float32) - 0.5
inputs = [X, w, b] if use_bias else [X, w]
self.assertDeviceChecks(dc, op, inputs, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_conv_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLElementwiseAddTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_mkl_elementwise_add(self,
size,
input_channels,
batch_size,
inplace,
gc,
dc):
op = core.CreateOperator(
"Add",
["X0", "X1"],
["X0" if inplace else "Y"],
)
Xs = [np.random.rand(batch_size, input_channels, size, size).astype(
np.float32) for _ in range(2)]
self.assertDeviceChecks(dc, op, Xs, [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_elementwise_add_op_test.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.