python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLElementwiseSumTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inputs=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_mkl_elementwise_sum(self,
size,
input_channels,
batch_size,
inputs,
inplace,
gc,
dc):
op = core.CreateOperator(
"Sum",
["X_{}".format(i) for i in range(inputs)],
["X_0" if inplace else "Y"],
)
Xs = [np.random.rand(batch_size, input_channels, size, size).astype(
np.float32) for _ in range(inputs)]
self.assertDeviceChecks(dc, op, Xs, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_elementwise_sum_op_test.py
|
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testReLUSpeed(self):
X = np.random.randn(128, 4096).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.Relu("X", "Y")
net.Relu("X_mkl", "Y_mkl", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-10,
rtol=1e-10)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
# The returned runtime is the time of
# [whole_net, cpu_op, mkl_op]
# so we will assume that the MKL one runs faster than the CPU one.
# Note(Yangqing): in fact, it seems that in optimized mode, this is
# not always guaranteed - MKL runs slower than the Eigen vectorized
# version, so I am turning this assertion off.
#self.assertTrue(runtime[1] >= runtime[2])
print("Relu CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testConvSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 27, 27).astype(np.float32) - 0.5
W = np.random.rand(192, 256, 3, 3).astype(np.float32) - 0.5
b = np.random.rand(192).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.Conv(["X", "W", "b"], "Y", pad=1, stride=1, kernel=3)
net.Conv(["X_mkl", "W_mkl", "b_mkl"], "Y_mkl",
pad=1, stride=1, kernel=3, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("Conv CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_speed_test.py
|
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testSpatialBNTestingSpeed(self):
input_channel = 10
X = np.random.rand(1, input_channel, 100, 100).astype(np.float32) - 0.5
scale = np.random.rand(input_channel).astype(np.float32) + 0.5
bias = np.random.rand(input_channel).astype(np.float32) - 0.5
mean = np.random.randn(input_channel).astype(np.float32)
var = np.random.rand(input_channel).astype(np.float32) + 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("scale", scale)
workspace.FeedBlob("bias", bias)
workspace.FeedBlob("mean", mean)
workspace.FeedBlob("var", var)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("scale_mkl", scale, device_option=mkl_do)
workspace.FeedBlob("bias_mkl", bias, device_option=mkl_do)
workspace.FeedBlob("mean_mkl", mean, device_option=mkl_do)
workspace.FeedBlob("var_mkl", var, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.SpatialBN(["X", "scale", "bias","mean","var"], "Y", order="NCHW",
is_test=True,
epsilon=1e-5)
net.SpatialBN(["X_mkl", "scale_mkl", "bias_mkl","mean_mkl","var_mkl"], "Y_mkl", order="NCHW",
is_test=True,
epsilon=1e-5, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("FC CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testSpatialBNTrainingSpeed(self):
input_channel = 10
X = np.random.rand(1, input_channel, 100, 100).astype(np.float32) - 0.5
scale = np.random.rand(input_channel).astype(np.float32) + 0.5
bias = np.random.rand(input_channel).astype(np.float32) - 0.5
mean = np.random.randn(input_channel).astype(np.float32)
var = np.random.rand(input_channel).astype(np.float32) + 0.5
#mean = np.zeros(input_channel)
#var = np.zeros(input_channel)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("scale", scale)
workspace.FeedBlob("bias", bias)
workspace.FeedBlob("mean", mean)
workspace.FeedBlob("var", var)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("scale_mkl", scale, device_option=mkl_do)
workspace.FeedBlob("bias_mkl", bias, device_option=mkl_do)
workspace.FeedBlob("mean_mkl", mean, device_option=mkl_do)
workspace.FeedBlob("var_mkl", var, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.SpatialBN(["X", "scale", "bias","mean", "var"],
["Y", "mean", "var", "saved_mean", "saved_var"],
order="NCHW",
is_test=False,
epsilon=1e-5)
net.SpatialBN(["X_mkl", "scale_mkl", "bias_mkl","mean_mkl","var_mkl"],
["Y_mkl", "mean_mkl", "var_mkl", "saved_mean_mkl", "saved_var_mkl"],
order="NCHW",
is_test=False,
epsilon=1e-5,
device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
np.testing.assert_allclose(
workspace.FetchBlob("mean"),
workspace.FetchBlob("mean_mkl"),
atol=1e-2,
rtol=1e-2)
np.testing.assert_allclose(
workspace.FetchBlob("var"),
workspace.FetchBlob("var_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("FC CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_sbn_speed_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
import caffe2.proto.caffe2_pb2 as pb2
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKCopyTest(hu.HypothesisTestCase):
@given(width=st.integers(7, 9),
height=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
**mu.gcs)
def test_mkl_copy(self,
width,
height,
input_channels,
batch_size,
gc, dc):
X = np.random.rand(
batch_size, input_channels, width, height).astype(np.float32)
self.ws.create_blob("X").feed(X, pb2.DeviceOption())
self.ws.run(core.CreateOperator(
"CopyCPUToMKL",
["X"],
["X_MKL"],
device_option=pb2.DeviceOption(device_type=pb2.MKLDNN)
))
self.ws.run(core.CreateOperator(
"CopyMKLToCPU",
["X_MKL"],
["X_copy"],
device_option=pb2.DeviceOption(device_type=pb2.MKLDNN)
))
np.testing.assert_array_equal(X, self.ws.blobs["X_copy"].fetch())
@given(n=st.sampled_from([0, 10]))
def test_mkl_zero_copy(self, n):
shape = (0, n)
X = np.zeros(shape=shape).astype(np.float32)
self.ws.create_blob("X").feed(X, pb2.DeviceOption())
self.ws.run(core.CreateOperator(
"CopyCPUToMKL",
["X"],
["X_MKL"],
device_option=pb2.DeviceOption(device_type=pb2.MKLDNN)
))
self.ws.run(core.CreateOperator(
"CopyMKLToCPU",
["X_MKL"],
["X_copy"],
device_option=pb2.DeviceOption(device_type=pb2.MKLDNN)
))
np.testing.assert_equal(shape, self.ws.blobs["X_copy"].fetch().shape)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_copy_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLLRNTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
im_size=st.integers(1, 10),
order=st.sampled_from(["NCHW"]),
**mu.gcs)
def test_mkl_LRN(self, input_channels,
batch_size, im_size, order,
gc, dc):
op = core.CreateOperator(
"LRN",
["X"],
["Y", "Y_scale"],
size=5,
alpha=0.001,
beta=0.75,
bias=2.0,
order=order,
)
X = np.random.rand(
batch_size, input_channels, im_size, im_size).astype(np.float32)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_LRN_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings, assume
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLPoolTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
method=st.sampled_from(["MaxPool", "AveragePool"]),
**mu.gcs)
@settings(max_examples=2, deadline=100)
def test_mkl_pooling(self, stride, pad, kernel, size,
input_channels, batch_size,
method, gc, dc):
assume(pad < kernel)
op = core.CreateOperator(
method,
["X"],
["Y"],
stride=stride,
pad=pad,
kernel=kernel,
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_pool_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLFcTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
def test_mkl_fc(self,n, m, k, gc, dc):
X = np.random.rand(m, k).astype(np.float32) - 0.5
W = np.random.rand(n, k).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
op = core.CreateOperator(
'FC',
['X', 'W', 'b'],
["Y"]
)
self.assertDeviceChecks(dc, op, [X, W, b], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_fc_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLSigmoidTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5), inplace=st.booleans(),
**mu.gcs)
def test_mkl_sigmoid(self, n, m, inplace, gc, dc):
X = np.random.rand(m, n).astype(np.float32)
op = core.CreateOperator(
"Sigmoid",
["X"],
["Y" if not inplace else "X"]
)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_sigmoid_op_test.py
|
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 2, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.LRN("X", ["Y", "Y_Scale"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW")
net.LRN("X_mkl", ["Y_mkl", "Y_Scale_mkl"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("LRN CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testConvReluLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 3, 224, 224).astype(np.float32) - 0.5
W = np.random.rand(64, 3, 11, 11).astype(np.float32) - 0.5
b = np.random.rand(64).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
net = core.Net("test")
net.Conv(["X", "W", "b"], "C", pad=1, stride=1, kernel=11)
net.Conv(["X_mkl", "W_mkl", "b_mkl"], "C_mkl",
pad=1, stride=1, kernel=11, device_option=mkl_do)
net.Relu("C", "R")
net.Relu("C_mkl", "R_mkl", device_option=mkl_do)
net.LRN("R", ["Y", "Y_Scale"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW")
net.LRN("R_mkl", ["Y_mkl", "Y_Scale_mkl"],size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_LRN_speed_test.py
|
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testMaxPoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.MaxPool("X", "Y", stride=2, kernel=3)
net.MaxPool("X_mkl", "Y_mkl",
stride=2, kernel=3, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("Maxpooling CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testAveragePoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.AveragePool("X", "Y", stride=2, kernel=3)
net.AveragePool("X_mkl", "Y_mkl",
stride=2, kernel=3, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("Averagepooling CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testConvReluMaxPoolSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 3, 224, 224).astype(np.float32) - 0.5
W = np.random.rand(64, 3, 11, 11).astype(np.float32) - 0.5
b = np.random.rand(64).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
net = core.Net("test")
net.Conv(["X", "W", "b"], "C", pad=1, stride=1, kernel=11)
net.Conv(["X_mkl", "W_mkl", "b_mkl"], "C_mkl",
pad=1, stride=1, kernel=11, device_option=mkl_do)
net.Relu("C", "R")
net.Relu("C_mkl", "R_mkl", device_option=mkl_do)
net.AveragePool("R", "Y", stride=2, kernel=3)
net.AveragePool("R_mkl", "Y_mkl",
stride=2, kernel=3, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_pool_speed_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLSpatialBNTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
#order=st.sampled_from(["NCHW", "NHWC"]),
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(1e-5, 1e-2),
**mu.gcs)
def test_spatialbn_test_mode(self, size, input_channels,
batch_size, seed, order, epsilon, gc, dc):
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y"],
order=order,
is_test=True,
epsilon=epsilon,
)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
#order=st.sampled_from(["NCHW", "NHWC"]),
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(1e-5, 1e-2),
**mu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "running_mean", "running_var"],
["Y", "running_mean", "running_var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
)
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
# Note: it seems that the running mean and var do not pass the device
# test, suggesting that the semantics are a bit different. Only
# checking the output and saved mean and var at this stage.
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var],
[0, 3, 4])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_sbn_op_test.py
|
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testFCSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5
#X = np.random.rand(32, 256*6*6).astype(np.float32) - 0.5
W = np.random.rand(4096, 9216).astype(np.float32) - 0.5
b = np.random.rand(4096).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.FC(["X", "W", "b"], "Y")
net.FC(["X_mkl", "W_mkl", "b_mkl"], "Y_mkl", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("FC CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testConvReluMaxPoolFcSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 13, 13).astype(np.float32) - 0.5
W = np.random.rand(256, 256, 3, 3).astype(np.float32) - 0.5
b = np.random.rand(256).astype(np.float32) - 0.5
w_fc = np.random.rand(4096, 9216).astype(np.float32) - 0.5
b_fc = np.random.rand(4096).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("w_fc", w_fc)
workspace.FeedBlob("b_fc", b_fc)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
workspace.FeedBlob("w_fc_mkl", w_fc, device_option=mkl_do)
workspace.FeedBlob("b_fc_mkl", b_fc, device_option=mkl_do)
net = core.Net("test")
net.Conv(["X", "W", "b"], "C", pad=1, stride=1, kernel=3)
net.Relu("C", "R")
net.MaxPool("R", "P", stride=2, kernel=3)
net.FC(["P","w_fc", "b_fc"], "Y")
net.Conv(["X_mkl", "W_mkl", "b_mkl"], "C_mkl",
pad=1, stride=1, kernel=3, device_option=mkl_do)
net.Relu("C_mkl", "R_mkl", device_option=mkl_do)
net.MaxPool("R_mkl", "P_mkl",
stride=2, kernel=3, device_option=mkl_do)
net.FC(["P_mkl","w_fc_mkl", "b_fc_mkl"], "Y_mkl", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_fc_speed_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLFillTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 4), c=st.integers(1, 4),
h=st.integers(1, 4), w=st.integers(1, 4),
filler=st.sampled_from(
["XavierFill", "ConstantFill", "GaussianFill", "MSRAFill"]
),
seed=st.integers(5, 10),
**mu.gcs_cpu_mkl)
def test_mkl_fill(self, n, c, h, w, filler, seed, gc, dc):
op = core.CreateOperator(
filler,
[],
["Y"],
shape=[n, c, h, w],
)
for d in dc:
d.random_seed = seed
self.assertDeviceChecks(dc, op, [], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_fill_op_test.py
|
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(
not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn."
)
class MKLSqueezeTest(hu.HypothesisTestCase):
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs
)
def test_mkl_squeeze(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
X = np.random.rand(*shape).astype(np.float32)
op = core.CreateOperator(
"Squeeze", "X", "X" if inplace else "Y", dims=squeeze_dims
)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/python/mkl/mkl_squeeze_op_test.py
|
## @package lmdb_create_example
# Module caffe2.python.examples.lmdb_create_example
import argparse
import numpy as np
import lmdb
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, model_helper
'''
Simple example to create an lmdb database of random image data and labels.
This can be used a skeleton to write your own data import.
It also runs a dummy-model with Caffe2 that reads the data and
validates the checksum is same.
'''
def create_db(output_file):
print(">>> Write database...")
LMDB_MAP_SIZE = 1 << 40 # MODIFY
env = lmdb.open(output_file, map_size=LMDB_MAP_SIZE)
checksum = 0
with env.begin(write=True) as txn:
for j in range(0, 128):
# MODIFY: add your own data reader / creator
label = j % 10
width = 64
height = 32
img_data = np.random.rand(3, width, height)
# ...
# Create TensorProtos
tensor_protos = caffe2_pb2.TensorProtos()
img_tensor = tensor_protos.protos.add()
img_tensor.dims.extend(img_data.shape)
img_tensor.data_type = 1
flatten_img = img_data.reshape(np.prod(img_data.shape))
img_tensor.float_data.extend(flatten_img)
label_tensor = tensor_protos.protos.add()
label_tensor.data_type = 2
label_tensor.int32_data.append(label)
txn.put(
'{}'.format(j).encode('ascii'),
tensor_protos.SerializeToString()
)
checksum += np.sum(img_data) * label
if (j % 16 == 0):
print("Inserted {} rows".format(j))
print("Checksum/write: {}".format(int(checksum)))
return checksum
def read_db_with_caffe2(db_file, expected_checksum):
print(">>> Read database...")
model = model_helper.ModelHelper(name="lmdbtest")
batch_size = 32
data, label = model.TensorProtosDBInput(
[], ["data", "label"], batch_size=batch_size,
db=db_file, db_type="lmdb")
checksum = 0
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
for _ in range(0, 4):
workspace.RunNet(model.net.Proto().name)
img_datas = workspace.FetchBlob("data")
labels = workspace.FetchBlob("label")
for j in range(batch_size):
checksum += np.sum(img_datas[j, :]) * labels[j]
print("Checksum/read: {}".format(int(checksum)))
assert np.abs(expected_checksum - checksum < 0.1), \
"Read/write checksums dont match"
def main():
parser = argparse.ArgumentParser(
description="Example LMDB creation"
)
parser.add_argument("--output_file", type=str, default=None,
help="Path to write the database to",
required=True)
args = parser.parse_args()
checksum = create_db(args.output_file)
# For testing reading:
read_db_with_caffe2(args.output_file, checksum)
if __name__ == '__main__':
main()
|
pytorch-master
|
caffe2/python/examples/lmdb_create_example.py
|
pytorch-master
|
caffe2/python/examples/__init__.py
|
|
imagenet_trainer.py
|
pytorch-master
|
caffe2/python/examples/resnet50_trainer.py
|
# Module caffe2.python.examples.resnet50_trainer
import argparse
import logging
import numpy as np
import time
import os
from caffe2.python import core, workspace, experiment_util, data_parallel_model
from caffe2.python import dyndep, optimizer
from caffe2.python import timeout_guard, model_helper, brew
from caffe2.proto import caffe2_pb2
import caffe2.python.models.resnet as resnet
import caffe2.python.models.shufflenet as shufflenet
from caffe2.python.modeling.initializers import Initializer, PseudoFP16Initializer
import caffe2.python.predictor.predictor_exporter as pred_exp
import caffe2.python.predictor.predictor_py_utils as pred_utils
from caffe2.python.predictor_constants import predictor_constants
'''
Parallelized multi-GPU distributed trainer for Resne(X)t & Shufflenet.
Can be used to train on imagenet data, for example.
The default parameters can train a standard Resnet-50 (1x64d), and parameters
can be provided to train ResNe(X)t models (e.g., ResNeXt-101 32x4d).
To run the trainer in single-machine multi-gpu mode by setting num_shards = 1.
To run the trainer in multi-machine multi-gpu mode with M machines,
run the same program on all machines, specifying num_shards = M, and
shard_id = a unique integer in the set [0, M-1].
For rendezvous (the trainer processes have to know about each other),
you can either use a directory path that is visible to all processes
(e.g. NFS directory), or use a Redis instance. Use the former by
passing the `file_store_path` argument. Use the latter by passing the
`redis_host` and `redis_port` arguments.
'''
logging.basicConfig()
log = logging.getLogger("Imagenet_trainer")
log.setLevel(logging.DEBUG)
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:file_store_handler_ops')
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:redis_store_handler_ops')
def AddImageInput(
model,
reader,
batch_size,
img_size,
dtype,
is_test,
mean_per_channel=None,
std_per_channel=None,
):
'''
The image input operator loads image and label data from the reader and
applies transformations to the images (random cropping, mirroring, ...).
'''
data, label = brew.image_input(
model,
reader, ["data", "label"],
batch_size=batch_size,
output_type=dtype,
use_gpu_transform=True if core.IsGPUDeviceType(model._device_type) else False,
use_caffe_datum=True,
mean_per_channel=mean_per_channel,
std_per_channel=std_per_channel,
# mean_per_channel takes precedence over mean
mean=128.,
std=128.,
scale=256,
crop=img_size,
mirror=1,
is_test=is_test,
)
data = model.StopGradient(data, data)
def AddNullInput(model, reader, batch_size, img_size, dtype):
'''
The null input function uses a gaussian fill operator to emulate real image
input. A label blob is hardcoded to a single value. This is useful if you
want to test compute throughput or don't have a dataset available.
'''
suffix = "_fp16" if dtype == "float16" else ""
model.param_init_net.GaussianFill(
[],
["data" + suffix],
shape=[batch_size, 3, img_size, img_size],
)
if dtype == "float16":
model.param_init_net.FloatToHalf("data" + suffix, "data")
model.param_init_net.ConstantFill(
[],
["label"],
shape=[batch_size],
value=1,
dtype=core.DataType.INT32,
)
def SaveModel(args, train_model, epoch, use_ideep):
prefix = "[]_{}".format(train_model._device_prefix, train_model._devices[0])
predictor_export_meta = pred_exp.PredictorExportMeta(
predict_net=train_model.net.Proto(),
parameters=data_parallel_model.GetCheckpointParams(train_model),
inputs=[prefix + "/data"],
outputs=[prefix + "/softmax"],
shapes={
prefix + "/softmax": (1, args.num_labels),
prefix + "/data": (args.num_channels, args.image_size, args.image_size)
}
)
# save the train_model for the current epoch
model_path = "%s/%s_%d.mdl" % (
args.file_store_path,
args.save_model_name,
epoch,
)
# set db_type to be "minidb" instead of "log_file_db", which breaks
# the serialization in save_to_db. Need to switch back to log_file_db
# after migration
pred_exp.save_to_db(
db_type="minidb",
db_destination=model_path,
predictor_export_meta=predictor_export_meta,
use_ideep=use_ideep
)
def LoadModel(path, model, use_ideep):
'''
Load pretrained model from file
'''
log.info("Loading path: {}".format(path))
meta_net_def = pred_exp.load_from_db(path, 'minidb')
init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.GLOBAL_INIT_NET_TYPE))
predict_init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.PREDICT_INIT_NET_TYPE))
if use_ideep:
predict_init_net.RunAllOnIDEEP()
else:
predict_init_net.RunAllOnGPU()
if use_ideep:
init_net.RunAllOnIDEEP()
else:
init_net.RunAllOnGPU()
assert workspace.RunNetOnce(predict_init_net)
assert workspace.RunNetOnce(init_net)
# Hack: fix iteration counter which is in CUDA context after load model
itercnt = workspace.FetchBlob("optimizer_iteration")
workspace.FeedBlob(
"optimizer_iteration",
itercnt,
device_option=core.DeviceOption(caffe2_pb2.CPU, 0)
)
def RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog,
):
'''
Run one epoch of the trainer.
TODO: add checkpointing here.
'''
# TODO: add loading from checkpoint
log.info("Starting epoch {}/{}".format(epoch, args.num_epochs))
epoch_iters = int(args.epoch_size / total_batch_size / num_shards)
test_epoch_iters = int(args.test_epoch_size / total_batch_size / num_shards)
for i in range(epoch_iters):
# This timeout is required (temporarily) since CUDA-NCCL
# operators might deadlock when synchronizing between GPUs.
timeout = args.first_iter_timeout if i == 0 else args.timeout
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
fmt = "Finished iteration {}/{} of epoch {} ({:.2f} images/sec)"
log.info(fmt.format(i + 1, epoch_iters, epoch, total_batch_size / dt))
prefix = "{}_{}".format(
train_model._device_prefix,
train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
train_fmt = "Training loss: {}, accuracy: {}"
log.info(train_fmt.format(loss, accuracy))
num_images = epoch * epoch_iters * total_batch_size
prefix = "{}_{}".format(train_model._device_prefix, train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
learning_rate = workspace.FetchBlob(
data_parallel_model.GetLearningRateBlobNames(train_model)[0]
)
test_accuracy = 0
test_accuracy_top5 = 0
if test_model is not None:
# Run 100 iters of testing
ntests = 0
for _ in range(test_epoch_iters):
workspace.RunNet(test_model.net.Proto().name)
for g in test_model._devices:
test_accuracy += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy'
))
test_accuracy_top5 += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy_top5'
))
ntests += 1
test_accuracy /= ntests
test_accuracy_top5 /= ntests
else:
test_accuracy = (-1)
test_accuracy_top5 = (-1)
explog.log(
input_count=num_images,
batch_count=(i + epoch * epoch_iters),
additional_values={
'accuracy': accuracy,
'loss': loss,
'learning_rate': learning_rate,
'epoch': epoch,
'top1_test_accuracy': test_accuracy,
'top5_test_accuracy': test_accuracy_top5,
}
)
assert loss < 40, "Exploded gradients :("
# TODO: add checkpointing
return epoch + 1
def Train(args):
if args.model == "resnext":
model_name = "resnext" + str(args.num_layers)
elif args.model == "shufflenet":
model_name = "shufflenet"
# Either use specified device list or generate one
if args.gpus is not None:
gpus = [int(x) for x in args.gpus.split(',')]
num_gpus = len(gpus)
else:
gpus = list(range(args.num_gpus))
num_gpus = args.num_gpus
log.info("Running on GPUs: {}".format(gpus))
# Verify valid batch size
total_batch_size = args.batch_size
batch_per_device = total_batch_size // num_gpus
assert \
total_batch_size % num_gpus == 0, \
"Number of GPUs must divide batch size"
# Verify valid image mean/std per channel
if args.image_mean_per_channel:
assert \
len(args.image_mean_per_channel) == args.num_channels, \
"The number of channels of image mean doesn't match input"
if args.image_std_per_channel:
assert \
len(args.image_std_per_channel) == args.num_channels, \
"The number of channels of image std doesn't match input"
# Round down epoch size to closest multiple of batch size across machines
global_batch_size = total_batch_size * args.num_shards
epoch_iters = int(args.epoch_size / global_batch_size)
assert \
epoch_iters > 0, \
"Epoch size must be larger than batch size times shard count"
args.epoch_size = epoch_iters * global_batch_size
log.info("Using epoch size: {}".format(args.epoch_size))
# Create ModelHelper object
if args.use_ideep:
train_arg_scope = {
'use_cudnn': False,
'cudnn_exhaustive_search': False,
'training_mode': 1
}
else:
train_arg_scope = {
'order': 'NCHW',
'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': (args.cudnn_workspace_limit_mb * 1024 * 1024),
}
train_model = model_helper.ModelHelper(
name=model_name, arg_scope=train_arg_scope
)
num_shards = args.num_shards
shard_id = args.shard_id
# Expect interfaces to be comma separated.
# Use of multiple network interfaces is not yet complete,
# so simply use the first one in the list.
interfaces = args.distributed_interfaces.split(",")
# Rendezvous using MPI when run with mpirun
if os.getenv("OMPI_COMM_WORLD_SIZE") is not None:
num_shards = int(os.getenv("OMPI_COMM_WORLD_SIZE", 1))
shard_id = int(os.getenv("OMPI_COMM_WORLD_RANK", 0))
if num_shards > 1:
rendezvous = dict(
kv_handler=None,
num_shards=num_shards,
shard_id=shard_id,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
mpi_rendezvous=True,
exit_nets=None)
elif num_shards > 1:
# Create rendezvous for distributed computation
store_handler = "store_handler"
if args.redis_host is not None:
# Use Redis for rendezvous if Redis host is specified
workspace.RunOperatorOnce(
core.CreateOperator(
"RedisStoreHandlerCreate", [], [store_handler],
host=args.redis_host,
port=args.redis_port,
prefix=args.run_id,
)
)
else:
# Use filesystem for rendezvous otherwise
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate", [], [store_handler],
path=args.file_store_path,
prefix=args.run_id,
)
)
rendezvous = dict(
kv_handler=store_handler,
shard_id=shard_id,
num_shards=num_shards,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
exit_nets=None)
else:
rendezvous = None
# Model building functions
def create_resnext_model_ops(model, loss_scale):
initializer = (PseudoFP16Initializer if args.dtype == 'float16'
else Initializer)
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=initializer,
BiasInitializer=initializer,
enable_tensor_core=args.enable_tensor_core,
float16_compute=args.float16_compute):
pred = resnet.create_resnext(
model,
"data",
num_input_channels=args.num_channels,
num_labels=args.num_labels,
num_layers=args.num_layers,
num_groups=args.resnext_num_groups,
num_width_per_group=args.resnext_width_per_group,
no_bias=True,
no_loss=True,
)
if args.dtype == 'float16':
pred = model.net.HalfToFloat(pred, pred + '_fp32')
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy", top_k=1)
brew.accuracy(model, [softmax, "label"], "accuracy_top5", top_k=5)
return [loss]
def create_shufflenet_model_ops(model, loss_scale):
initializer = (PseudoFP16Initializer if args.dtype == 'float16'
else Initializer)
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=initializer,
BiasInitializer=initializer,
enable_tensor_core=args.enable_tensor_core,
float16_compute=args.float16_compute):
pred = shufflenet.create_shufflenet(
model,
"data",
num_input_channels=args.num_channels,
num_labels=args.num_labels,
no_loss=True,
)
if args.dtype == 'float16':
pred = model.net.HalfToFloat(pred, pred + '_fp32')
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy", top_k=1)
brew.accuracy(model, [softmax, "label"], "accuracy_top5", top_k=5)
return [loss]
def add_optimizer(model):
stepsz = int(30 * args.epoch_size / total_batch_size / num_shards)
if args.float16_compute:
# TODO: merge with multi-precision optimizer
opt = optimizer.build_fp16_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
weight_decay=args.weight_decay, # weight decay included
policy="step",
stepsize=stepsz,
gamma=0.1
)
else:
optimizer.add_weight_decay(model, args.weight_decay)
opt = optimizer.build_multi_precision_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
policy="step",
stepsize=stepsz,
gamma=0.1
)
return opt
# Define add_image_input function.
# Depends on the "train_data" argument.
# Note that the reader will be shared with between all GPUS.
if args.train_data == "null":
def add_image_input(model):
AddNullInput(
model,
None,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
)
else:
reader = train_model.CreateDB(
"reader",
db=args.train_data,
db_type=args.db_type,
num_shards=num_shards,
shard_id=shard_id,
)
def add_image_input(model):
AddImageInput(
model,
reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=False,
mean_per_channel=args.image_mean_per_channel,
std_per_channel=args.image_std_per_channel,
)
def add_post_sync_ops(model):
"""Add ops applied after initial parameter sync."""
for param_info in model.GetOptimizationParamInfo(model.GetParams()):
if param_info.blob_copy is not None:
model.param_init_net.HalfToFloat(
param_info.blob,
param_info.blob_copy[core.DataType.FLOAT]
)
data_parallel_model.Parallelize(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnext_model_ops
if args.model == "resnext" else create_shufflenet_model_ops,
optimizer_builder_fun=add_optimizer,
post_sync_builder_fun=add_post_sync_ops,
devices=gpus,
rendezvous=rendezvous,
optimize_gradient_memory=False,
use_nccl=args.use_nccl,
cpu_device=args.use_cpu,
ideep=args.use_ideep,
shared_model=args.use_cpu,
combine_spatial_bn=args.use_cpu,
)
data_parallel_model.OptimizeGradientMemory(train_model, {}, set(), False)
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net)
# Add test model, if specified
test_model = None
if (args.test_data is not None):
log.info("----- Create test net ----")
if args.use_ideep:
test_arg_scope = {
'use_cudnn': False,
'cudnn_exhaustive_search': False,
}
else:
test_arg_scope = {
'order': "NCHW",
'use_cudnn': True,
'cudnn_exhaustive_search': True,
}
test_model = model_helper.ModelHelper(
name=model_name + "_test",
arg_scope=test_arg_scope,
init_params=False,
)
test_reader = test_model.CreateDB(
"test_reader",
db=args.test_data,
db_type=args.db_type,
)
def test_input_fn(model):
AddImageInput(
model,
test_reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=True,
mean_per_channel=args.image_mean_per_channel,
std_per_channel=args.image_std_per_channel,
)
data_parallel_model.Parallelize(
test_model,
input_builder_fun=test_input_fn,
forward_pass_builder_fun=create_resnext_model_ops
if args.model == "resnext" else create_shufflenet_model_ops,
post_sync_builder_fun=add_post_sync_ops,
param_update_builder_fun=None,
devices=gpus,
use_nccl=args.use_nccl,
cpu_device=args.use_cpu,
)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net)
epoch = 0
# load the pre-trained model and reset epoch
if args.load_model_path is not None:
LoadModel(args.load_model_path, train_model, args.use_ideep)
# Sync the model params
data_parallel_model.FinalizeAfterCheckpoint(train_model)
# reset epoch. load_model_path should end with *_X.mdl,
# where X is the epoch number
last_str = args.load_model_path.split('_')[-1]
if last_str.endswith('.mdl'):
epoch = int(last_str[:-4])
log.info("Reset epoch to {}".format(epoch))
else:
log.warning("The format of load_model_path doesn't match!")
expname = "%s_gpu%d_b%d_L%d_lr%.2f_v2" % (
model_name,
args.num_gpus,
total_batch_size,
args.num_labels,
args.base_learning_rate,
)
explog = experiment_util.ModelTrainerLog(expname, args)
# Run the training one epoch a time
while epoch < args.num_epochs:
epoch = RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog
)
# Save the model for each epoch
SaveModel(args, train_model, epoch, args.use_ideep)
model_path = "%s/%s_" % (
args.file_store_path,
args.save_model_name
)
# remove the saved model from the previous epoch if it exists
if os.path.isfile(model_path + str(epoch - 1) + ".mdl"):
os.remove(model_path + str(epoch - 1) + ".mdl")
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
description="Caffe2: ImageNet Trainer"
)
parser.add_argument("--train_data", type=str, default=None, required=True,
help="Path to training data (or 'null' to simulate)")
parser.add_argument("--num_layers", type=int, default=50,
help="The number of layers in ResNe(X)t model")
parser.add_argument("--resnext_num_groups", type=int, default=1,
help="The cardinality of resnext")
parser.add_argument("--resnext_width_per_group", type=int, default=64,
help="The cardinality of resnext")
parser.add_argument("--test_data", type=str, default=None,
help="Path to test data")
parser.add_argument("--image_mean_per_channel", type=float, nargs='+',
help="The per channel mean for the images")
parser.add_argument("--image_std_per_channel", type=float, nargs='+',
help="The per channel standard deviation for the images")
parser.add_argument("--test_epoch_size", type=int, default=50000,
help="Number of test images")
parser.add_argument("--db_type", type=str, default="lmdb",
help="Database type (such as lmdb or leveldb)")
parser.add_argument("--gpus", type=str,
help="Comma separated list of GPU devices to use")
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of GPU devices (instead of --gpus)")
parser.add_argument("--num_channels", type=int, default=3,
help="Number of color channels")
parser.add_argument("--image_size", type=int, default=224,
help="Input image size (to crop to)")
parser.add_argument("--num_labels", type=int, default=1000,
help="Number of labels")
parser.add_argument("--batch_size", type=int, default=32,
help="Batch size, total over all GPUs")
parser.add_argument("--epoch_size", type=int, default=1500000,
help="Number of images/epoch, total over all machines")
parser.add_argument("--num_epochs", type=int, default=1000,
help="Num epochs.")
parser.add_argument("--base_learning_rate", type=float, default=0.1,
help="Initial learning rate.")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help="Weight decay (L2 regularization)")
parser.add_argument("--cudnn_workspace_limit_mb", type=int, default=64,
help="CuDNN workspace limit in MBs")
parser.add_argument("--num_shards", type=int, default=1,
help="Number of machines in distributed run")
parser.add_argument("--shard_id", type=int, default=0,
help="Shard id.")
parser.add_argument("--run_id", type=str,
help="Unique run identifier (e.g. uuid)")
parser.add_argument("--redis_host", type=str,
help="Host of Redis server (for rendezvous)")
parser.add_argument("--redis_port", type=int, default=6379,
help="Port of Redis server (for rendezvous)")
parser.add_argument("--file_store_path", type=str, default="/tmp",
help="Path to directory to use for rendezvous")
parser.add_argument("--save_model_name", type=str, default="resnext_model",
help="Save the trained model to a given name")
parser.add_argument("--load_model_path", type=str, default=None,
help="Load previously saved model to continue training")
parser.add_argument("--use_cpu", action="store_true",
help="Use CPU instead of GPU")
parser.add_argument("--use_nccl", action="store_true",
help="Use nccl for inter-GPU collectives")
parser.add_argument("--use_ideep", type=bool, default=False,
help="Use ideep")
parser.add_argument('--dtype', default='float',
choices=['float', 'float16'],
help='Data type used for training')
parser.add_argument('--float16_compute', action='store_true',
help="Use float 16 compute, if available")
parser.add_argument('--enable_tensor_core', action='store_true',
help='Enable Tensor Core math for Conv and FC ops')
parser.add_argument("--distributed_transport", type=str, default="tcp",
help="Transport to use for distributed run [tcp|ibverbs]")
parser.add_argument("--distributed_interfaces", type=str, default="",
help="Network interfaces to use for distributed run")
parser.add_argument("--first_iter_timeout", type=int, default=1200,
help="Timeout (secs) of the first iteration "
"(default: %(default)s)")
parser.add_argument("--timeout", type=int, default=60,
help="Timeout (secs) of each (except the first) iteration "
"(default: %(default)s)")
parser.add_argument("--model",
default="resnext", const="resnext", nargs="?",
choices=["shufflenet", "resnext"],
help="List of models which can be run")
args = parser.parse_args()
Train(args)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main()
|
pytorch-master
|
caffe2/python/examples/imagenet_trainer.py
|
## @package char_rnn
# Module caffe2.python.examples.char_rnn
from caffe2.python import core, workspace, model_helper, utils, brew
from caffe2.python.rnn_cell import LSTM
from caffe2.proto import caffe2_pb2
from caffe2.python.optimizer import build_sgd
import argparse
import logging
import numpy as np
from datetime import datetime
'''
This script takes a text file as input and uses a recurrent neural network
to learn to predict next character in a sequence.
'''
logging.basicConfig()
log = logging.getLogger("char_rnn")
log.setLevel(logging.DEBUG)
# Default set() here is intentional as it would accumulate values like a global
# variable
def CreateNetOnce(net, created_names=set()): # noqa
name = net.Name()
if name not in created_names:
created_names.add(name)
workspace.CreateNet(net)
class CharRNN(object):
def __init__(self, args):
self.seq_length = args.seq_length
self.batch_size = args.batch_size
self.iters_to_report = args.iters_to_report
self.hidden_size = args.hidden_size
with open(args.train_data) as f:
self.text = f.read()
self.vocab = list(set(self.text))
self.char_to_idx = {ch: idx for idx, ch in enumerate(self.vocab)}
self.idx_to_char = {idx: ch for idx, ch in enumerate(self.vocab)}
self.D = len(self.char_to_idx)
print("Input has {} characters. Total input size: {}".format(
len(self.vocab), len(self.text)))
def CreateModel(self):
log.debug("Start training")
model = model_helper.ModelHelper(name="char_rnn")
input_blob, seq_lengths, hidden_init, cell_init, target = \
model.net.AddExternalInputs(
'input_blob',
'seq_lengths',
'hidden_init',
'cell_init',
'target',
)
hidden_output_all, self.hidden_output, _, self.cell_state = LSTM(
model, input_blob, seq_lengths, (hidden_init, cell_init),
self.D, self.hidden_size, scope="LSTM")
output = brew.fc(
model,
hidden_output_all,
None,
dim_in=self.hidden_size,
dim_out=self.D,
axis=2
)
# axis is 2 as first two are T (time) and N (batch size).
# We treat them as one big batch of size T * N
softmax = model.net.Softmax(output, 'softmax', axis=2)
softmax_reshaped, _ = model.net.Reshape(
softmax, ['softmax_reshaped', '_'], shape=[-1, self.D])
# Create a copy of the current net. We will use it on the forward
# pass where we don't need loss and backward operators
self.forward_net = core.Net(model.net.Proto())
xent = model.net.LabelCrossEntropy([softmax_reshaped, target], 'xent')
# Loss is average both across batch and through time
# Thats why the learning rate below is multiplied by self.seq_length
loss = model.net.AveragedLoss(xent, 'loss')
model.AddGradientOperators([loss])
# use build_sdg function to build an optimizer
build_sgd(
model,
base_learning_rate=0.1 * self.seq_length,
policy="step",
stepsize=1,
gamma=0.9999
)
self.model = model
self.predictions = softmax
self.loss = loss
self.prepare_state = core.Net("prepare_state")
self.prepare_state.Copy(self.hidden_output, hidden_init)
self.prepare_state.Copy(self.cell_state, cell_init)
def _idx_at_pos(self, pos):
return self.char_to_idx[self.text[pos]]
def TrainModel(self):
log.debug("Training model")
workspace.RunNetOnce(self.model.param_init_net)
# As though we predict the same probability for each character
smooth_loss = -np.log(1.0 / self.D) * self.seq_length
last_n_iter = 0
last_n_loss = 0.0
num_iter = 0
N = len(self.text)
# We split text into batch_size pieces. Each piece will be used only
# by a corresponding batch during the training process
text_block_positions = np.zeros(self.batch_size, dtype=np.int32)
text_block_size = N // self.batch_size
text_block_starts = list(range(0, N, text_block_size))
text_block_sizes = [text_block_size] * self.batch_size
text_block_sizes[self.batch_size - 1] += N % self.batch_size
assert sum(text_block_sizes) == N
# Writing to output states which will be copied to input
# states within the loop below
workspace.FeedBlob(self.hidden_output, np.zeros(
[1, self.batch_size, self.hidden_size], dtype=np.float32
))
workspace.FeedBlob(self.cell_state, np.zeros(
[1, self.batch_size, self.hidden_size], dtype=np.float32
))
workspace.CreateNet(self.prepare_state)
# We iterate over text in a loop many times. Each time we peak
# seq_length segment and feed it to LSTM as a sequence
last_time = datetime.now()
progress = 0
while True:
workspace.FeedBlob(
"seq_lengths",
np.array([self.seq_length] * self.batch_size,
dtype=np.int32)
)
workspace.RunNet(self.prepare_state.Name())
input = np.zeros(
[self.seq_length, self.batch_size, self.D]
).astype(np.float32)
target = np.zeros(
[self.seq_length * self.batch_size]
).astype(np.int32)
for e in range(self.batch_size):
for i in range(self.seq_length):
pos = text_block_starts[e] + text_block_positions[e]
input[i][e][self._idx_at_pos(pos)] = 1
target[i * self.batch_size + e] =\
self._idx_at_pos((pos + 1) % N)
text_block_positions[e] = (
text_block_positions[e] + 1) % text_block_sizes[e]
progress += 1
workspace.FeedBlob('input_blob', input)
workspace.FeedBlob('target', target)
CreateNetOnce(self.model.net)
workspace.RunNet(self.model.net.Name())
num_iter += 1
last_n_iter += 1
if num_iter % self.iters_to_report == 0:
new_time = datetime.now()
print("Characters Per Second: {}". format(
int(progress / (new_time - last_time).total_seconds())
))
print("Iterations Per Second: {}". format(
int(self.iters_to_report /
(new_time - last_time).total_seconds())
))
last_time = new_time
progress = 0
print("{} Iteration {} {}".
format('-' * 10, num_iter, '-' * 10))
loss = workspace.FetchBlob(self.loss) * self.seq_length
smooth_loss = 0.999 * smooth_loss + 0.001 * loss
last_n_loss += loss
if num_iter % self.iters_to_report == 0:
self.GenerateText(500, np.random.choice(self.vocab))
log.debug("Loss since last report: {}"
.format(last_n_loss / last_n_iter))
log.debug("Smooth loss: {}".format(smooth_loss))
last_n_loss = 0.0
last_n_iter = 0
def GenerateText(self, num_characters, ch):
# Given a starting symbol we feed a fake sequence of size 1 to
# our RNN num_character times. After each time we use output
# probabilities to pick a next character to feed to the network.
# Same character becomes part of the output
CreateNetOnce(self.forward_net)
text = '' + ch
for _i in range(num_characters):
workspace.FeedBlob(
"seq_lengths", np.array([1] * self.batch_size, dtype=np.int32))
workspace.RunNet(self.prepare_state.Name())
input = np.zeros([1, self.batch_size, self.D]).astype(np.float32)
input[0][0][self.char_to_idx[ch]] = 1
workspace.FeedBlob("input_blob", input)
workspace.RunNet(self.forward_net.Name())
p = workspace.FetchBlob(self.predictions)
next = np.random.choice(self.D, p=p[0][0])
ch = self.idx_to_char[next]
text += ch
print(text)
@utils.debug
def main():
parser = argparse.ArgumentParser(
description="Caffe2: Char RNN Training"
)
parser.add_argument("--train_data", type=str, default=None,
help="Path to training data in a text file format",
required=True)
parser.add_argument("--seq_length", type=int, default=25,
help="One training example sequence length")
parser.add_argument("--batch_size", type=int, default=1,
help="Training batch size")
parser.add_argument("--iters_to_report", type=int, default=500,
help="How often to report loss and generate text")
parser.add_argument("--hidden_size", type=int, default=100,
help="Dimension of the hidden representation")
parser.add_argument("--gpu", action="store_true",
help="If set, training is going to use GPU 0")
args = parser.parse_args()
device = core.DeviceOption(
workspace.GpuDeviceType if args.gpu else caffe2_pb2.CPU, 0)
with core.DeviceScope(device):
model = CharRNN(args)
model.CreateModel()
model.TrainModel()
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main()
|
pytorch-master
|
caffe2/python/examples/char_rnn.py
|
import argparse
import numpy as np
from caffe2.python import core, workspace
def main(bit_rate):
# uncomment for debugging
# np.random.seed(0)
batchsize = 10 * 1000
blocksize = 64
print(batchsize, blocksize)
input_data = np.random.rand(batchsize, blocksize).astype(np.float32)
workspace.FeedBlob("input_data", input_data)
net = core.Net("bench")
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized",
"input_data",
"quantized_data",
engine="GREEDY",
)
net.Proto().op.extend([op])
workspace.GlobalInit(["caffe2", "--caffe2_log_level=0"])
workspace.CreateNet(net)
iterations = 10
workspace.BenchmarkNet(net.Proto().name, 1, iterations, True)
net2 = core.Net("bench2")
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized",
"input_data",
"quantized_data",
)
net2.Proto().op.extend([op])
workspace.CreateNet(net2)
workspace.BenchmarkNet(net2.Proto().name, 1, iterations, True)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="benchmark for row-wise 2/4-bit quantization."
)
parser.add_argument("--bit-rate", type=int, default=4)
args = parser.parse_args()
main(args.bit_rate)
|
pytorch-master
|
caffe2/python/benchmarks/fused_rowwise_nbit_conversion_bench.py
|
import argparse
import datetime
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
def benchmark_sparse_lengths_sum(
categorical_limit,
embedding_size,
average_len,
batch_size,
iterations,
flush_cache,
bit_rate=st.sampled_from([2, 4]),
):
print("Preparing lookup table. " + str(datetime.datetime.now()))
# We will use a constant, but non-trivial value so we save initialization
# time.
data = np.ones([categorical_limit, embedding_size], dtype=np.float32)
data *= 17.01
init_net = core.Net("init_net")
op = core.CreateOperator(
"FloatToFused" + str(bit_rate) + "BitRowwiseQuantized", "X", "X_q"
)
init_net.Proto().op.extend([op])
workspace.FeedBlob("X", data)
print("Data has shape {} {}".format(data.shape, datetime.datetime.now()))
# In order to produce truly random lengths and indices, we will embed a
# Python operator in the net to generate them.
def f(_, outputs):
lengths = np.random.randint(
int(average_len * 0.75), int(average_len * 1.25), batch_size
).astype(np.int32)
indices = np.random.randint(0, categorical_limit, np.sum(lengths)).astype(
np.int64
)
outputs[0].feed(indices)
outputs[1].feed(lengths)
init_net.Python(f)([], ["indices", "lengths"])
workspace.RunNetOnce(init_net)
net = core.Net("mynet")
if flush_cache:
l3_cache_size = 30 * 2 ** 20 // 4
workspace.FeedBlob(
"huge_blob", np.random.randn(l3_cache_size).astype(np.float32)
)
net.Scale("huge_blob", "huge_blob_2x", value=2.0)
op = core.CreateOperator(
"SparseLengthsSumFused" + str(bit_rate) + "BitRowwise",
["X_q", "indices", "lengths"],
"Y",
)
net.Proto().op.extend([op])
workspace.CreateNet(net)
# Set random seed, so that repeated runs will keep the same sequence of
# random indices.
np.random.seed(1701)
print("Preparation finished. " + str(datetime.datetime.now()))
runtimes = workspace.BenchmarkNet(net.Name(), 1, iterations, True)
print(
"{} billion sums per sec".format(
embedding_size
* workspace.FetchBlob("indices").size
/ runtimes[2 if flush_cache else 1]
/ 1e6
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="minimal benchmark for sparse lengths sum."
)
parser.add_argument(
"-e", "--embedding-size", type=int, default=6000000, help="Lookup table size."
)
parser.add_argument(
"--embedding-dim", type=int, default=128, help="Embedding dimension."
)
parser.add_argument(
"--average_len",
type=int,
default=27,
help="Sparse feature average lengths, default is 27",
)
parser.add_argument("--batch_size", type=int, default=100, help="The batch size.")
parser.add_argument(
"-i", "--iteration", type=int, default=100000, help="The number of iterations."
)
parser.add_argument(
"--flush-cache", action="store_true", help="If true, flush cache"
)
parser.add_argument("--bit-rate", type=int, default=4)
args, extra_args = parser.parse_known_args()
core.GlobalInit(["python"] + extra_args)
benchmark_sparse_lengths_sum(
args.embedding_size,
args.embedding_dim,
args.average_len,
args.batch_size,
args.iteration,
args.flush_cache,
args.bit_rate,
)
|
pytorch-master
|
caffe2/python/benchmarks/sparse_lengths_sum_nbit_benchmark.py
|
import argparse
import numpy as np
from caffe2.python import core, workspace
def benchmark_concat(num_inputs, input_dim, axis, add_axis, iterations):
input_names = [f"input{i}" for i in range(num_inputs)]
for n in input_names:
workspace.FeedBlob(n, np.random.randn(*input_dim).astype(np.float32))
net = core.Net("benchmark_net")
net.Concat(input_names, ["output", "split_info"], axis=axis, add_axis=add_axis)
workspace.CreateNet(net)
runtimes = workspace.BenchmarkNet(net.Name(), 1, iterations, True)
print(f"{num_inputs * np.prod(input_dim) * 4 / runtimes[1] / 1e6} GB/s")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="minimal benchmark for concat.")
parser.add_argument("--num_inputs", type=int, default=2)
parser.add_argument("--input_dim", nargs="+", type=int, required=True)
parser.add_argument("--axis", type=int, default=-1)
parser.add_argument("--add_axis", type=int, default=0)
parser.add_argument("--iterations", type=int, default=64)
args, extra_args = parser.parse_known_args()
core.GlobalInit(["python"] + extra_args)
benchmark_concat(
args.num_inputs, args.input_dim, args.axis, args.add_axis, args.iterations
)
|
pytorch-master
|
caffe2/python/benchmarks/concat_benchmark.py
|
import argparse
import datetime
# import hypothesis.strategies as st
import numpy as np
from caffe2.python import core, workspace
def benchmark_sparse_normalize(
categorical_limit,
embedding_size,
average_len,
batch_size,
iterations,
flush_cache,
fp16,
):
print("Preparing lookup table. " + str(datetime.datetime.now()))
# We will use a constant, but non-trivial value so we save initialization
# time.
data = np.ones([categorical_limit, embedding_size], dtype=np.float32)
data *= 17.01
init_net = core.Net("init_net")
if fp16:
op = core.CreateOperator("FloatToHalf", "X", "X_fp16")
init_net.Proto().op.extend([op])
l3_cache_size = 30 * 2 ** 20 // 4
# In order to produce truly random lengths and indices, we will embed a
# Python operator in the net to generate them.
def f(_, outputs):
lengths = np.random.randint(
int(average_len * 0.75), int(average_len * 1.25), batch_size
).astype(np.int32)
indices = np.random.randint(0, categorical_limit, np.sum(lengths)).astype(
np.int64
)
outputs[0].feed(indices)
workspace.FeedBlob("X", data)
workspace.FeedBlob("huge_blob", np.random.randn(l3_cache_size).astype(np.float32))
print("Data has shape {} {}".format(data.shape, datetime.datetime.now()))
init_net.Python(f)([], ["indices"])
workspace.RunNetOnce(init_net)
net = core.Net("mynet")
op = core.CreateOperator(
"Float16SparseNormalize" if fp16 else "SparseNormalize",
["X_fp16", "indices"] if fp16 else ["X", "indices"],
"X_fp16" if fp16 else "X",
)
net.Proto().external_input.append("X")
net.Proto().external_input.append("X_fp16")
net.Proto().external_input.append("indices")
net.Proto().op.extend([op])
if flush_cache:
net.Scale("huge_blob", "huge_blob_2x", value=2.0)
workspace.CreateNet(net)
# Set random seed, so that repeated runs will keep the same sequence of
# random indices.
np.random.seed(1701)
print("Preparation finished. " + str(datetime.datetime.now()))
runtimes = workspace.BenchmarkNet(net.Name(), 1, iterations, True)
print("{} ms".format(runtimes[2 if flush_cache else 1]))
print("indice_size: " + str(workspace.FetchBlob("indices").size))
print(
"{} GB/sec".format(
(2 if fp16 else 4)
* embedding_size
* workspace.FetchBlob("indices").size
/ runtimes[2 if flush_cache else 1]
/ 1e6
)
)
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="minimal benchmark for sparse lengths sum."
)
parser.add_argument(
"-e", "--embedding-size", type=int, default=600000, help="Lookup table size."
)
parser.add_argument(
"--embedding-dim", type=int, default=128, help="Embedding dimension."
)
parser.add_argument(
"--average-len",
type=int,
default=27,
help="Sparse feature average lengths, default is 27",
)
parser.add_argument("--batch_size", type=int, default=100, help="The batch size.")
parser.add_argument(
"-i", "--iteration", type=int, default=100, help="The number of iterations."
)
parser.add_argument(
"--flush-cache", action="store_true", help="If true, flush cache"
)
parser.add_argument("--fp16", action="store_true", help="If true, use fp16")
args, extra_args = parser.parse_known_args()
core.GlobalInit(["python"] + extra_args)
benchmark_sparse_normalize(
args.embedding_size,
args.embedding_dim,
args.average_len,
args.batch_size,
args.iteration,
args.flush_cache,
args.fp16,
)
|
pytorch-master
|
caffe2/python/benchmarks/sparse_normalize_benchmark.py
|
## @package predictor_py_utils
# Module caffe2.python.predictor.predictor_py_utils
from caffe2.python import core, scope
def create_predict_net(predictor_export_meta):
"""
Return the input prediction net.
"""
# Construct a new net to clear the existing settings.
net = core.Net(predictor_export_meta.predict_net.name or "predict")
net.Proto().op.extend(predictor_export_meta.predict_net.op)
net.Proto().partition_info.extend(predictor_export_meta.predict_net.partition_info)
net.Proto().external_input.extend(
predictor_export_meta.inputs + predictor_export_meta.parameters
)
net.Proto().external_output.extend(predictor_export_meta.outputs)
net.Proto().arg.extend(predictor_export_meta.predict_net.arg)
if predictor_export_meta.net_type is not None:
net.Proto().type = predictor_export_meta.net_type
if predictor_export_meta.num_workers is not None:
net.Proto().num_workers = predictor_export_meta.num_workers
return net.Proto()
def create_predict_init_net(ws, predictor_export_meta):
"""
Return an initialization net that zero-fill all the input and
output blobs, using the shapes from the provided workspace. This is
necessary as there is no shape inference functionality in Caffe2.
"""
net = core.Net("predict-init")
def zero_fill(blob):
shape = predictor_export_meta.shapes.get(blob)
if shape is None:
if blob not in ws.blobs:
raise Exception(
"{} not in workspace but needed for shape: {}".format(
blob, ws.blobs
)
)
shape = ws.blobs[blob].fetch().shape
# Explicitly null-out the scope so users (e.g. PredictorGPU)
# can control (at a Net-global level) the DeviceOption of
# these filling operators.
with scope.EmptyDeviceScope():
net.ConstantFill([], blob, shape=shape, value=0.0)
external_blobs = predictor_export_meta.inputs + predictor_export_meta.outputs
for blob in external_blobs:
zero_fill(blob)
net.Proto().external_input.extend(external_blobs)
if predictor_export_meta.extra_init_net:
net.AppendNet(predictor_export_meta.extra_init_net)
# Add the model_id in the predict_net to the init_net
AddModelIdArg(predictor_export_meta, net.Proto())
return net.Proto()
def get_comp_name(string, name):
if name:
return string + "_" + name
return string
def to_first_match_dict(kv_list):
"""
Construct dict from kv_list
"""
d = {}
for item in kv_list:
if item.key not in d:
d[item.key] = item.value
return d
def _ProtoMapGet(field, key):
"""
Given the key, get the value of the repeated field.
Helper function used by protobuf since it doesn't have map construct
"""
for v in field:
if v.key == key:
return v.value
return None
def GetPlan(meta_net_def, key):
return _ProtoMapGet(meta_net_def.plans, key)
def GetPlanOriginal(meta_net_def, key):
return _ProtoMapGet(meta_net_def.plans, key)
def GetBlobs(meta_net_def, key):
blobs = _ProtoMapGet(meta_net_def.blobs, key)
if blobs is None:
return []
return blobs
def GetBlobsByTypePrefix(meta_net_def, blob_type_prefix):
blob_map = {}
for b in meta_net_def.blobs:
if b.key.startswith(blob_type_prefix):
for blob in b.value:
if blob not in blob_map:
blob_map[blob] = len(blob_map)
return sorted(blob_map, key=lambda blob: blob_map[blob])
def GetNet(meta_net_def, key):
return _ProtoMapGet(meta_net_def.nets, key)
def GetNetOriginal(meta_net_def, key):
return _ProtoMapGet(meta_net_def.nets, key)
def GetApplicationSpecificInfo(meta_net_def, key):
return _ProtoMapGet(meta_net_def.applicationSpecificInfo, key)
def GetApplicationSpecificInfoDict(meta_net_def):
return to_first_match_dict(meta_net_def.applicationSpecificInfo)
def AddBlobs(meta_net_def, blob_name, blob_def):
blobs = _ProtoMapGet(meta_net_def.blobs, blob_name)
if blobs is None:
blobs = meta_net_def.blobs.add()
blobs.key = blob_name
blobs = blobs.value
for blob in blob_def:
blobs.append(blob)
def ReplaceBlobs(meta_net_def, blob_name, blob_def):
blobs = _ProtoMapGet(meta_net_def.blobs, blob_name)
assert blobs is not None, "The blob_name:{} does not exist".format(blob_name)
del blobs[:]
for blob in blob_def:
blobs.append(blob)
def AddPlan(meta_net_def, plan_name, plan_def):
meta_net_def.plans.add(key=plan_name, value=plan_def)
def AddNet(meta_net_def, net_name, net_def):
meta_net_def.nets.add(key=net_name, value=net_def)
def SetBlobsOrder(meta_net_def, blobs_order):
for blob in blobs_order:
meta_net_def.blobsOrder.append(blob)
def SetPreLoadBlobs(meta_net_def, pre_load_blobs):
for blob in pre_load_blobs:
meta_net_def.preLoadBlobs.append(blob)
def SetRequestOnlyEmbeddings(meta_net_def, request_only_embeddings):
for blob in request_only_embeddings:
meta_net_def.requestOnlyEmbeddings.append(blob)
def GetBlobsOrder(meta_net_def):
return meta_net_def.blobsOrder
def SetTensorBoundShapes(meta_net_def, tensor_bound_shapes):
meta_net_def.tensorBoundShapes.CopyFrom(tensor_bound_shapes)
def SetAOTConfig(meta_net_def, aot_config):
meta_net_def.aotConfig.CopyFrom(aot_config)
def GetArgumentByName(net_def, arg_name):
for arg in net_def.arg:
if arg.name == arg_name:
return arg
return None
def AddModelIdArg(meta_net_def, net_def):
"""Takes the model_id from the predict_net of meta_net_def (if it is
populated) and adds it to the net_def passed in. This is intended to be
called on init_nets, as their model_id is not populated by default, but
should be the same as that of the predict_net
"""
# Get model_id from the predict_net, assuming it's an integer
model_id = GetArgumentByName(meta_net_def.predict_net, "model_id")
if model_id is None:
return
model_id = model_id.i
# If there's another model_id on the net, replace it with the new one
old_id = GetArgumentByName(net_def, "model_id")
if old_id is not None:
old_id.i = model_id
return
# Add as an integer argument, this is also assumed above
arg = net_def.arg.add()
arg.name = "model_id"
arg.i = model_id
|
pytorch-master
|
caffe2/python/predictor/predictor_py_utils.py
|
## @package predictor_exporter
# Module caffe2.python.predictor.predictor_exporter
from caffe2.proto import caffe2_pb2
from caffe2.proto import metanet_pb2
from caffe2.python import workspace, core, scope
from caffe2.python.predictor_constants import predictor_constants
import caffe2.python.predictor.serde as serde
import caffe2.python.predictor.predictor_py_utils as utils
from builtins import bytes
import collections
def get_predictor_exporter_helper(submodelNetName):
""" constracting stub for the PredictorExportMeta
Only used to construct names to subfields,
such as calling to predict_net_name
Args:
submodelNetName - name of the model
"""
stub_net = core.Net(submodelNetName)
pred_meta = PredictorExportMeta(predict_net=stub_net,
parameters=[],
inputs=[],
outputs=[],
shapes=None,
name=submodelNetName,
extra_init_net=None)
return pred_meta
# pyre-fixme[13]: Pyre can't detect the attribute initialization via cls.super() here
class PredictorExportMeta(collections.namedtuple(
'PredictorExportMeta',
'predict_net, parameters, inputs, outputs, shapes, name, '
'extra_init_net, global_init_net, net_type, num_workers, trainer_prefix')):
"""
Metadata to be used for serializaing a net.
parameters, inputs, outputs could be either BlobReference or blob's names
predict_net can be either core.Net, NetDef, PlanDef or object
Override the named tuple to provide optional name parameter.
name will be used to identify multiple prediction nets.
net_type is the type field in caffe2 NetDef - can be 'simple', 'dag', etc.
num_workers specifies for net type 'dag' how many threads should run ops
trainer_prefix specifies the type of trainer.
extra_init_net gets appended to pred_init_net, useful for thread local init
global_init_net gets appended to global_init_net, useful for global init
on a shared across threads parameter workspace
(in a case of multi-threaded inference)
"""
def __new__(
cls,
predict_net,
parameters,
inputs,
outputs,
shapes=None,
name="",
extra_init_net=None,
global_init_net=None,
net_type=None,
num_workers=None,
trainer_prefix=None,
):
inputs = [str(i) for i in inputs]
outputs = [str(o) for o in outputs]
assert len(set(inputs)) == len(inputs), (
"All inputs to the predictor should be unique")
parameters = [str(p) for p in parameters]
assert set(parameters).isdisjoint(inputs), (
"Parameters and inputs are required to be disjoint. "
"Intersection: {}".format(set(parameters).intersection(inputs)))
assert set(parameters).isdisjoint(outputs), (
"Parameters and outputs are required to be disjoint. "
"Intersection: {}".format(set(parameters).intersection(outputs)))
shapes = shapes or {}
if predict_net is not None:
if isinstance(predict_net, (core.Net, core.Plan)):
predict_net = predict_net.Proto()
assert isinstance(predict_net, (caffe2_pb2.NetDef, caffe2_pb2.PlanDef))
return super(PredictorExportMeta, cls).__new__(
cls, predict_net, parameters, inputs, outputs, shapes, name,
extra_init_net, global_init_net, net_type, num_workers, trainer_prefix)
def inputs_name(self):
return utils.get_comp_name(predictor_constants.INPUTS_BLOB_TYPE,
self.name)
def outputs_name(self):
return utils.get_comp_name(predictor_constants.OUTPUTS_BLOB_TYPE,
self.name)
def parameters_name(self):
return utils.get_comp_name(predictor_constants.PARAMETERS_BLOB_TYPE,
self.name)
def global_init_name(self):
return utils.get_comp_name(predictor_constants.GLOBAL_INIT_NET_TYPE,
self.name)
def predict_init_name(self):
return utils.get_comp_name(predictor_constants.PREDICT_INIT_NET_TYPE,
self.name)
def predict_net_name(self):
return utils.get_comp_name(predictor_constants.PREDICT_NET_TYPE,
self.name)
def train_init_plan_name(self):
plan_name = utils.get_comp_name(predictor_constants.TRAIN_INIT_PLAN_TYPE,
self.name)
return self.trainer_prefix + '_' + plan_name \
if self.trainer_prefix else plan_name
def train_plan_name(self):
plan_name = utils.get_comp_name(predictor_constants.TRAIN_PLAN_TYPE,
self.name)
return self.trainer_prefix + '_' + plan_name \
if self.trainer_prefix else plan_name
def prepare_prediction_net(filename, db_type, device_option=None):
'''
Helper function which loads all required blobs from the db
and returns prediction net ready to be used
'''
metanet_def = load_from_db(filename, db_type, device_option)
global_init_net = utils.GetNet(
metanet_def, predictor_constants.GLOBAL_INIT_NET_TYPE)
workspace.RunNetOnce(global_init_net)
predict_init_net = utils.GetNet(
metanet_def, predictor_constants.PREDICT_INIT_NET_TYPE)
workspace.RunNetOnce(predict_init_net)
predict_net = core.Net(
utils.GetNet(metanet_def, predictor_constants.PREDICT_NET_TYPE))
workspace.CreateNet(predict_net)
return predict_net
def _global_init_net(predictor_export_meta, db_type):
net = core.Net("global-init")
# manifold_db does not need DBReader
if db_type != "manifold_db":
net.Load(
[predictor_constants.PREDICTOR_DBREADER],
predictor_export_meta.parameters)
net.Proto().external_input.extend([predictor_constants.PREDICTOR_DBREADER])
net.Proto().external_output.extend(predictor_export_meta.parameters)
if predictor_export_meta.global_init_net:
net.AppendNet(predictor_export_meta.global_init_net)
# Add the model_id in the predict_net to the global_init_net
utils.AddModelIdArg(predictor_export_meta, net.Proto())
return net.Proto()
def get_meta_net_def(predictor_export_meta, ws=None, db_type=None):
"""
"""
ws = ws or workspace.C.Workspace.current
meta_net_def = metanet_pb2.MetaNetDef()
# Predict net is the core network that we use.
utils.AddNet(meta_net_def, predictor_export_meta.predict_init_name(),
utils.create_predict_init_net(ws, predictor_export_meta))
utils.AddNet(meta_net_def, predictor_export_meta.global_init_name(),
_global_init_net(predictor_export_meta, db_type))
utils.AddNet(meta_net_def, predictor_export_meta.predict_net_name(),
utils.create_predict_net(predictor_export_meta))
utils.AddBlobs(meta_net_def, predictor_export_meta.parameters_name(),
predictor_export_meta.parameters)
utils.AddBlobs(meta_net_def, predictor_export_meta.inputs_name(),
predictor_export_meta.inputs)
utils.AddBlobs(meta_net_def, predictor_export_meta.outputs_name(),
predictor_export_meta.outputs)
return meta_net_def
def set_model_info(meta_net_def, project_str, model_class_str, version):
assert isinstance(meta_net_def, metanet_pb2.MetaNetDef)
meta_net_def.modelInfo.project = project_str
meta_net_def.modelInfo.modelClass = model_class_str
meta_net_def.modelInfo.version = version
def save_to_db(db_type, db_destination, predictor_export_meta, use_ideep=False,
*args, **kwargs):
meta_net_def = get_meta_net_def(predictor_export_meta, db_type=db_type)
device_type = caffe2_pb2.IDEEP if use_ideep else caffe2_pb2.CPU
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
workspace.FeedBlob(
predictor_constants.META_NET_DEF,
serde.serialize_protobuf_struct(meta_net_def)
)
blobs_to_save = [predictor_constants.META_NET_DEF] + \
predictor_export_meta.parameters
op = core.CreateOperator(
"Save",
blobs_to_save, [],
device_option = core.DeviceOption(device_type),
absolute_path=True,
db=db_destination, db_type=db_type,
**kwargs
)
workspace.RunOperatorOnce(op)
def load_from_db(filename, db_type, device_option=None, *args, **kwargs):
# global_init_net in meta_net_def will load parameters from
# predictor_constants.PREDICTOR_DBREADER
create_db = core.CreateOperator(
'CreateDB', [],
[core.BlobReference(predictor_constants.PREDICTOR_DBREADER)],
db=filename, db_type=db_type)
assert workspace.RunOperatorOnce(create_db), (
'Failed to create db {}'.format(filename))
# predictor_constants.META_NET_DEF is always stored before the parameters
load_meta_net_def = core.CreateOperator(
'Load',
[core.BlobReference(predictor_constants.PREDICTOR_DBREADER)],
[core.BlobReference(predictor_constants.META_NET_DEF)])
assert workspace.RunOperatorOnce(load_meta_net_def)
blob = workspace.FetchBlob(predictor_constants.META_NET_DEF)
meta_net_def = serde.deserialize_protobuf_struct(
blob if isinstance(blob, bytes)
else str(blob).encode('utf-8'),
metanet_pb2.MetaNetDef)
if device_option is None:
device_option = scope.CurrentDeviceScope()
if device_option is not None:
# Set the device options of all loaded blobs
for kv in meta_net_def.nets:
net = kv.value
for op in net.op:
op.device_option.CopyFrom(device_option)
return meta_net_def
|
pytorch-master
|
caffe2/python/predictor/predictor_exporter.py
|
## @package serde
# Module caffe2.python.predictor.serde
def serialize_protobuf_struct(protobuf_struct):
return protobuf_struct.SerializeToString()
def deserialize_protobuf_struct(serialized_protobuf, struct_type):
deser = struct_type()
deser.ParseFromString(serialized_protobuf)
return deser
|
pytorch-master
|
caffe2/python/predictor/serde.py
|
from caffe2.python.test_util import TestCase
from caffe2.python import workspace, brew
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
import numpy as np
class TestMobileExporter(TestCase):
def test_mobile_exporter(self):
model = ModelHelper(name="mobile_exporter_test_model")
# Test LeNet
brew.conv(model, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
brew.max_pool(model, 'conv1', 'pool1', kernel=2, stride=2)
brew.conv(model, 'pool1', 'conv2', dim_in=20, dim_out=50, kernel=5)
brew.max_pool(model, 'conv2', 'pool2', kernel=2, stride=2)
brew.fc(model, 'pool2', 'fc3', dim_in=50 * 4 * 4, dim_out=500)
brew.relu(model, 'fc3', 'fc3')
brew.fc(model, 'fc3', 'pred', 500, 10)
brew.softmax(model, 'pred', 'out')
# Create our mobile exportable networks
workspace.RunNetOnce(model.param_init_net)
init_net, predict_net = mobile_exporter.Export(
workspace, model.net, model.params
)
# Populate the workspace with data
np_data = np.random.rand(1, 1, 28, 28).astype(np.float32)
workspace.FeedBlob("data", np_data)
workspace.CreateNet(model.net)
workspace.RunNet(model.net)
ref_out = workspace.FetchBlob("out")
# Clear the workspace
workspace.ResetWorkspace()
# Populate the workspace with data
workspace.RunNetOnce(init_net)
# Fake "data" is populated by init_net, we have to replace it
workspace.FeedBlob("data", np_data)
# Overwrite the old net
workspace.CreateNet(predict_net, True)
workspace.RunNet(predict_net.name)
manual_run_out = workspace.FetchBlob("out")
np.testing.assert_allclose(
ref_out, manual_run_out, atol=1e-10, rtol=1e-10
)
# Clear the workspace
workspace.ResetWorkspace()
# Predictor interface test (simulates writing to disk)
predictor = workspace.Predictor(
init_net.SerializeToString(), predict_net.SerializeToString()
)
# Output is a vector of outputs but we only care about the first and only result
predictor_out = predictor.run([np_data])
assert len(predictor_out) == 1
predictor_out = predictor_out[0]
np.testing.assert_allclose(
ref_out, predictor_out, atol=1e-10, rtol=1e-10
)
def test_mobile_exporter_datatypes(self):
model = ModelHelper(name="mobile_exporter_test_model")
model.Copy("data_int", "out")
model.params.append("data_int")
model.Copy("data_obj", "out_obj")
model.params.append("data_obj")
# Create our mobile exportable networks
workspace.RunNetOnce(model.param_init_net)
np_data_int = np.random.randint(100, size=(1, 1, 28, 28), dtype=np.int32)
workspace.FeedBlob("data_int", np_data_int)
np_data_obj = np.array(['aa', 'bb']).astype(np.dtype('O'))
workspace.FeedBlob("data_obj", np_data_obj)
init_net, predict_net = mobile_exporter.Export(
workspace, model.net, model.params
)
workspace.CreateNet(model.net)
workspace.RunNet(model.net)
ref_out = workspace.FetchBlob("out")
ref_out_obj = workspace.FetchBlob("out_obj")
# Clear the workspace
workspace.ResetWorkspace()
# Populate the workspace with data
workspace.RunNetOnce(init_net)
# Overwrite the old net
workspace.CreateNet(predict_net, True)
workspace.RunNet(predict_net.name)
manual_run_out = workspace.FetchBlob("out")
manual_run_out_obj = workspace.FetchBlob("out_obj")
np.testing.assert_allclose(
ref_out, manual_run_out, atol=1e-10, rtol=1e-10
)
np.testing.assert_equal(ref_out_obj, manual_run_out_obj)
# Clear the workspace
workspace.ResetWorkspace()
# Predictor interface test (simulates writing to disk)
predictor = workspace.Predictor(
init_net.SerializeToString(), predict_net.SerializeToString()
)
# Output is a vector of outputs.
predictor_out = predictor.run([])
assert len(predictor_out) == 2
predictor_out_int = predictor_out[1]
predictor_out_obj = predictor_out[0]
# The order in predictor_out is non-deterministic. Use type of the entry
# to figure out what to compare it to.
if isinstance(predictor_out[1][0], bytes):
predictor_out_int = predictor_out[0]
predictor_out_obj = predictor_out[1]
np.testing.assert_allclose(
ref_out, predictor_out_int, atol=1e-10, rtol=1e-10
)
np.testing.assert_equal(ref_out_obj, predictor_out_obj)
|
pytorch-master
|
caffe2/python/predictor/mobile_exporter_test.py
|
pytorch-master
|
caffe2/python/predictor/__init__.py
|
|
import tempfile
import unittest
import numpy as np
from caffe2.python import cnn, workspace, core
from future.utils import viewitems
from caffe2.python.predictor_constants import predictor_constants as pc
import caffe2.python.predictor.predictor_exporter as pe
import caffe2.python.predictor.predictor_py_utils as pred_utils
from caffe2.proto import caffe2_pb2, metanet_pb2
class MetaNetDefTest(unittest.TestCase):
def test_minimal(self):
'''
Tests that a NetsMap message can be created with a NetDef message
'''
# This calls the constructor for a metanet_pb2.NetsMap
metanet_pb2.NetsMap(key="test_key", value=caffe2_pb2.NetDef())
def test_adding_net(self):
'''
Tests that NetDefs can be added to MetaNetDefs
'''
meta_net_def = metanet_pb2.MetaNetDef()
net_def = caffe2_pb2.NetDef()
meta_net_def.nets.add(key="test_key", value=net_def)
def test_replace_blobs(self):
'''
Tests that NetDefs can be added to MetaNetDefs
'''
meta_net_def = metanet_pb2.MetaNetDef()
blob_name = "Test"
blob_def = ["AA"]
blob_def2 = ["BB"]
replaced_blob_def = ["CC"]
pred_utils.AddBlobs(meta_net_def, blob_name, blob_def)
self.assertEqual(blob_def, pred_utils.GetBlobs(meta_net_def, blob_name))
pred_utils.AddBlobs(meta_net_def, blob_name, blob_def2)
self.assertEqual(blob_def + blob_def2, pred_utils.GetBlobs(meta_net_def, blob_name))
pred_utils.ReplaceBlobs(meta_net_def, blob_name, replaced_blob_def)
self.assertEqual(replaced_blob_def, pred_utils.GetBlobs(meta_net_def, blob_name))
class PredictorExporterTest(unittest.TestCase):
def _create_model(self):
m = cnn.CNNModelHelper()
m.FC("data", "y",
dim_in=5, dim_out=10,
weight_init=m.XavierInit,
bias_init=m.XavierInit)
return m
def setUp(self):
np.random.seed(1)
m = self._create_model()
self.predictor_export_meta = pe.PredictorExportMeta(
predict_net=m.net.Proto(),
parameters=[str(b) for b in m.params],
inputs=["data"],
outputs=["y"],
shapes={"y": (1, 10), "data": (1, 5)},
)
workspace.RunNetOnce(m.param_init_net)
self.params = {
param: workspace.FetchBlob(param)
for param in self.predictor_export_meta.parameters}
# Reset the workspace, to ensure net creation proceeds as expected.
workspace.ResetWorkspace()
def test_meta_constructor(self):
'''
Test that passing net itself instead of proto works
'''
m = self._create_model()
pe.PredictorExportMeta(
predict_net=m.net,
parameters=m.params,
inputs=["data"],
outputs=["y"],
shapes={"y": (1, 10), "data": (1, 5)},
)
def test_param_intersection(self):
'''
Test that passes intersecting parameters and input/output blobs
'''
m = self._create_model()
with self.assertRaises(Exception):
pe.PredictorExportMeta(
predict_net=m.net,
parameters=m.params,
inputs=["data"] + m.params,
outputs=["y"],
shapes={"y": (1, 10), "data": (1, 5)},
)
with self.assertRaises(Exception):
pe.PredictorExportMeta(
predict_net=m.net,
parameters=m.params,
inputs=["data"],
outputs=["y"] + m.params,
shapes={"y": (1, 10), "data": (1, 5)},
)
def test_meta_net_def_net_runs(self):
for param, value in viewitems(self.params):
workspace.FeedBlob(param, value)
extra_init_net = core.Net('extra_init')
extra_init_net.ConstantFill('data', 'data', value=1.0)
global_init_net = core.Net('global_init')
global_init_net.ConstantFill(
[],
'global_init_blob',
value=1.0,
shape=[1, 5],
dtype=core.DataType.FLOAT
)
pem = pe.PredictorExportMeta(
predict_net=self.predictor_export_meta.predict_net,
parameters=self.predictor_export_meta.parameters,
inputs=self.predictor_export_meta.inputs,
outputs=self.predictor_export_meta.outputs,
shapes=self.predictor_export_meta.shapes,
extra_init_net=extra_init_net,
global_init_net=global_init_net,
net_type='dag',
)
db_type = 'minidb'
db_file = tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(db_type))
pe.save_to_db(
db_type=db_type,
db_destination=db_file.name,
predictor_export_meta=pem)
workspace.ResetWorkspace()
meta_net_def = pe.load_from_db(
db_type=db_type,
filename=db_file.name,
)
self.assertTrue("data" not in workspace.Blobs())
self.assertTrue("y" not in workspace.Blobs())
init_net = pred_utils.GetNet(meta_net_def, pc.PREDICT_INIT_NET_TYPE)
# 0-fills externalblobs blobs and runs extra_init_net
workspace.RunNetOnce(init_net)
self.assertTrue("data" in workspace.Blobs())
self.assertTrue("y" in workspace.Blobs())
print(workspace.FetchBlob("data"))
np.testing.assert_array_equal(
workspace.FetchBlob("data"), np.ones(shape=(1, 5)))
np.testing.assert_array_equal(
workspace.FetchBlob("y"), np.zeros(shape=(1, 10)))
self.assertTrue("global_init_blob" not in workspace.Blobs())
# Load parameters from DB
global_init_net = pred_utils.GetNet(meta_net_def,
pc.GLOBAL_INIT_NET_TYPE)
workspace.RunNetOnce(global_init_net)
# make sure the extra global_init_net is running
self.assertTrue(workspace.HasBlob('global_init_blob'))
np.testing.assert_array_equal(
workspace.FetchBlob("global_init_blob"), np.ones(shape=(1, 5)))
# Run the net with a reshaped input and verify we are
# producing good numbers (with our custom implementation)
workspace.FeedBlob("data", np.random.randn(2, 5).astype(np.float32))
predict_net = pred_utils.GetNet(meta_net_def, pc.PREDICT_NET_TYPE)
self.assertEqual(predict_net.type, 'dag')
workspace.RunNetOnce(predict_net)
np.testing.assert_array_almost_equal(
workspace.FetchBlob("y"),
workspace.FetchBlob("data").dot(self.params["y_w"].T) +
self.params["y_b"])
def test_load_device_scope(self):
for param, value in self.params.items():
workspace.FeedBlob(param, value)
pem = pe.PredictorExportMeta(
predict_net=self.predictor_export_meta.predict_net,
parameters=self.predictor_export_meta.parameters,
inputs=self.predictor_export_meta.inputs,
outputs=self.predictor_export_meta.outputs,
shapes=self.predictor_export_meta.shapes,
net_type='dag',
)
db_type = 'minidb'
db_file = tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(db_type))
pe.save_to_db(
db_type=db_type,
db_destination=db_file.name,
predictor_export_meta=pem)
workspace.ResetWorkspace()
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 1)):
meta_net_def = pe.load_from_db(
db_type=db_type,
filename=db_file.name,
)
init_net = core.Net(pred_utils.GetNet(meta_net_def,
pc.GLOBAL_INIT_NET_TYPE))
predict_init_net = core.Net(pred_utils.GetNet(
meta_net_def, pc.PREDICT_INIT_NET_TYPE))
# check device options
for op in list(init_net.Proto().op) + list(predict_init_net.Proto().op):
self.assertEqual(1, op.device_option.device_id)
self.assertEqual(caffe2_pb2.CPU, op.device_option.device_type)
def test_db_fails_without_params(self):
with self.assertRaises(Exception):
for db_type in ["minidb"]:
db_file = tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(db_type))
pe.save_to_db(
db_type=db_type,
db_destination=db_file.name,
predictor_export_meta=self.predictor_export_meta)
|
pytorch-master
|
caffe2/python/predictor/predictor_exporter_test.py
|
import unittest
import numpy as np
from caffe2.python import workspace, core
from caffe2.proto import caffe2_pb2
class TestPredictor(unittest.TestCase):
def setUp(self):
np.random.seed(1)
self.predict_net = self._predict_net
self.init_net = self._init_net
@property
def _predict_net(self):
net = caffe2_pb2.NetDef()
net.name = 'test-predict-net'
net.external_input[:] = ['A', 'B']
net.external_output[:] = ['C']
net.op.extend([
core.CreateOperator(
'MatMul',
['A', 'B'],
['C'],
)
])
return net.SerializeToString()
@property
def _init_net(self):
net = caffe2_pb2.NetDef()
net.name = 'test-init-net'
net.external_output[:] = ['A', 'B']
net.op.extend([
core.CreateOperator(
'GivenTensorFill',
[],
['A'],
shape=(2, 3),
values=np.zeros((2, 3), np.float32).flatten().tolist(),
),
core.CreateOperator(
'GivenTensorFill',
[],
['B'],
shape=(3, 4),
values=np.zeros((3, 4), np.float32).flatten().tolist(),
),
])
return net.SerializeToString()
def test_run(self):
A = np.ones((2, 3), np.float32)
B = np.ones((3, 4), np.float32)
predictor = workspace.Predictor(self.init_net, self.predict_net)
outputs = predictor.run([A, B])
self.assertEqual(len(outputs), 1)
np.testing.assert_almost_equal(np.dot(A, B), outputs[0])
def test_run_map(self):
A = np.zeros((2, 3), np.float32)
B = np.ones((3, 4), np.float32)
predictor = workspace.Predictor(self.init_net, self.predict_net)
outputs = predictor.run({
'B': B,
})
self.assertEqual(len(outputs), 1)
np.testing.assert_almost_equal(np.dot(A, B), outputs[0])
|
pytorch-master
|
caffe2/python/predictor/predictor_test.py
|
## @package mobile_exporter
# Module caffe2.python.mobile_exporter
from caffe2.python import core, utils
from caffe2.proto import caffe2_pb2
import numpy as np
def add_tensor(net, name, blob):
''' Create an operator to store the tensor 'blob',
run the operator to put the blob to workspace.
uint8 is stored as an array of string with one element.
'''
kTypeNameMapper = {
np.dtype('float32'): "GivenTensorFill",
np.dtype('int32'): "GivenTensorIntFill",
np.dtype('int64'): "GivenTensorInt64Fill",
np.dtype('uint8'): "GivenTensorByteStringToUInt8Fill",
np.dtype('O'): "GivenTensorStringFill"
}
shape = blob.shape
values = blob
# pass array of uint8 as a string to save storage
# storing uint8_t has a large overhead for now
if blob.dtype == np.dtype('uint8'):
shape = blob.shape
values = [blob.tobytes()]
# Only allow string arrays as objects.
# The only intended use case for this is to store arrays of strings in the
# model which can be used for post processing results in subsequent ops.
if blob.dtype == np.dtype('O'):
for blob_val in blob:
assert(isinstance(blob_val, bytes))
op = core.CreateOperator(
kTypeNameMapper[blob.dtype],
[], [name],
arg=[
utils.MakeArgument("shape", shape),
utils.MakeArgument("values", values),
]
)
net.op.extend([op])
def Export(workspace, net, params):
"""Returns init_net and predict_net suitable for writing to disk
and loading into a Predictor"""
proto = net if isinstance(net, caffe2_pb2.NetDef) else net.Proto()
predict_net = caffe2_pb2.NetDef()
predict_net.CopyFrom(proto)
init_net = caffe2_pb2.NetDef()
# Populate the init_net.
ssa, blob_versions = core.get_ssa(net)
inputs = []
for versioned_inputs, _ in ssa:
inputs += [name for name, _ in versioned_inputs]
input_blobs = [blob_name for blob_name, version in
blob_versions.items()
if version == 0 and blob_name not in params]
# Blobs that are never used as an input to another layer,
# i.e. strictly output blobs.
output_blobs = [blob_name for blob_name, version in
blob_versions.items()
if version != 0 and blob_name not in inputs]
for blob_ref in params:
blob_name = str(blob_ref)
blob = workspace.FetchBlob(blob_name)
add_tensor(init_net, blob_name, blob)
# We have to make sure the blob exists in the namespace
# and we can do so with fake data. (Which is immediately overwritten
# by any typical usage)
for blob_name in input_blobs:
init_net.op.extend(
[
core.CreateOperator(
"GivenTensorFill", [], [blob_name],
arg=[
utils.MakeArgument("shape", [1, 1]),
utils.MakeArgument("values", [0.0])
]
)
]
)
# Now we make input/output_blobs line up with what Predictor expects.
del predict_net.external_input[:]
new_external_inputs = input_blobs
for external_input in proto.external_input:
if external_input not in new_external_inputs:
new_external_inputs.append(external_input)
# For populating weights
predict_net.external_input.extend(new_external_inputs)
# Ensure the output is also consistent with what we want
del predict_net.external_output[:]
predict_net.external_output.extend(output_blobs)
return init_net, predict_net
|
pytorch-master
|
caffe2/python/predictor/mobile_exporter.py
|
## @package elementwise_linear
# Module caffe2.python.helpers.elementwise_linear
from caffe2.python import core
from caffe2.python.modeling.parameter_info import ParameterTags
def _elementwise_linear(
model, op_call, blob_in, blob_out, dim,
weight_init=None, bias_init=None, **kwargs
):
"""Elementwise_Linear"""
weight_init = weight_init or ('ConstantFill', {'value': 1.0})
bias_init = bias_init or ('ConstantFill', {'value': 0.0})
blob_out = blob_out or model.net.NextName()
if model.init_params:
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_w',
shape=[dim],
**weight_init[1]
)
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_b',
shape=[dim],
**bias_init[1]
)
else:
weight = core.ScopedBlobReference(
blob_out + '_w', model.param_init_net)
bias = core.ScopedBlobReference(
blob_out + '_b', model.param_init_net)
model.AddParameter(weight, ParameterTags.WEIGHT)
model.AddParameter(bias, ParameterTags.BIAS)
return op_call([blob_in, weight, bias], blob_out, **kwargs)
def elementwise_linear(model, *args, **kwargs):
return _elementwise_linear(
model, model.net.ElementwiseLinear, *args, **kwargs)
|
pytorch-master
|
caffe2/python/helpers/elementwise_linear.py
|
## @package fc
# Module caffe2.python.helpers.fc
from caffe2.python import core
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
def _FC_or_packed_FC(
model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None,
bias_init=None, WeightInitializer=None, BiasInitializer=None,
enable_tensor_core=False, float16_compute=False, **kwargs
):
WeightInitializer = initializers.update_initializer(
WeightInitializer, weight_init, ("XavierFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
if not model.init_params:
WeightInitializer = initializers.ExternalInitializer()
BiasInitializer = initializers.ExternalInitializer()
blob_out = blob_out or model.net.NextName()
bias_tags = [ParameterTags.BIAS]
if 'freeze_bias' in kwargs:
bias_tags.append(ParameterTags.COMPUTED_PARAM)
weight = model.create_param(
param_name=blob_out + '_w',
shape=[dim_out, dim_in],
initializer=WeightInitializer,
tags=ParameterTags.WEIGHT
)
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_out, ],
initializer=BiasInitializer,
tags=bias_tags
)
# enable TensorCore by setting appropriate engine
if enable_tensor_core:
kwargs['engine'] = 'TENSORCORE'
# Enable float 16 compute kernel (relevant for CUDA)
if float16_compute:
kwargs['float16_compute'] = True
return op_call([blob_in, weight, bias], blob_out, **kwargs)
def fc(model, *args, **kwargs):
return _FC_or_packed_FC(model, model.net.FC, *args, **kwargs)
def packed_fc(model, *args, **kwargs):
return _FC_or_packed_FC(model, model.net.PackedFC, *args, **kwargs)
def fc_decomp(
model, blob_in, blob_out, dim_in, dim_out,
rank_approx=5, weight_init=None, bias_init=None,
WeightInitializer=None, BiasInitializer=None, **kwargs
):
"""FC_Decomp version
Here we assume that the rank of original input is bigger than 5.
"""
WeightInitializer = initializers.update_initializer(
WeightInitializer, weight_init, ("XavierFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
blob_out = blob_out or model.net.NextName()
u = model.create_param(
param_name=blob_out + '_u',
shape=[dim_out, rank_approx],
initializer=WeightInitializer,
)
v = model.create_param(
param_name=blob_out + '_v',
shape=[dim_in, rank_approx],
initializer=WeightInitializer,
)
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_out, ],
initializer=BiasInitializer,
)
return model.net.FC_Decomp([blob_in, u, v, bias], blob_out, **kwargs)
def fc_prune(
model, blob_in, blob_out, dim_in, dim_out,
weight_init=None, bias_init=None, mask_init=None,
threshold=0.00001, need_compress_rate=False,
comp_lb=0.05,
**kwargs
):
"""FC_Prune version
Runnable so far. Great!:)
"""
weight_init = weight_init if weight_init else ('XavierFill', {})
bias_init = bias_init if bias_init else ('ConstantFill', {})
mask_init = mask_init if mask_init else ('ConstantFill', {})
blob_out = blob_out or model.net.NextName()
compress_rate = blob_out + '_compress_rate'
if model.init_params:
compress_lb = model.param_init_net.ConstantFill(
[],
blob_out + '_lb',
shape=[1],
value=comp_lb
)
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_w',
shape=[dim_out, dim_in],
**weight_init[1]
)
mask = model.param_init_net.ConstantFill(
[],
blob_out + '_m',
shape=[dim_out, dim_in],
value=1.0
)
ag_dw = model.param_init_net.__getattr__(mask_init[0])(
[],
blob_out + '_ag_dw',
shape=[dim_out, dim_in],
**mask_init[1]
)
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_b',
shape=[dim_out, ],
**bias_init[1]
)
mask_seq = model.param_init_net.__getattr__(mask_init[0])(
[],
blob_out + '_mask_seq',
shape=[dim_out, dim_in],
**mask_init[1]
)
thres = model.param_init_net.ConstantFill(
[],
blob_out + '_thres',
shape=[1],
value=threshold
)
else:
compress_lb = core.ScopedBlobReference(
blob_out + '_lb', model.param_init_net)
weight = core.ScopedBlobReference(
blob_out + '_w', model.param_init_net)
bias = core.ScopedBlobReference(
blob_out + '_b', model.param_init_net)
mask = core.ScopedBlobReference(
blob_out + '_m', model.param_init_net)
ag_dw = core.ScopedBlobReference(
blob_out + '_ag_dw', model.param_init_net)
mask_seq = core.ScopedBlobReference(
blob_out + '_mask_seq', model.param_init_net)
thres = core.ScopedBlobReference(
blob_out + '_thres', model.param_init_net)
model.AddParameter(weight)
model.AddParameter(bias)
if need_compress_rate:
return model.net.FC_Prune([blob_in, weight, mask, bias, ag_dw, mask_seq,
thres, compress_lb],
[blob_out, compress_rate], **kwargs)
else:
return model.net.FC_Prune([blob_in, weight, mask,
bias, ag_dw, mask_seq,
thres, compress_lb],
blob_out, **kwargs)
def fc_sparse(
model, blob_in, blob_out, w_csr, iw, jw, bias,
**kwargs
):
"""FC_Sparse: Only takes in allocated weights"""
if not (w_csr and iw and jw and bias):
print("Warning...")
model.AddParameter(w_csr)
model.AddParameter(iw)
model.AddParameter(jw)
model.AddParameter(bias)
return model.net.FC_Sparse([blob_in, w_csr, iw, jw, bias],
blob_out, **kwargs)
|
pytorch-master
|
caffe2/python/helpers/fc.py
|
# @package quantization
# Module caffe2.python.helpers.quantization
def fused_8bit_rowwise_quantized_to_float(
model, blob_in, blob_out
):
"""Fused8BitRowwiseQuantizedToFloat"""
return model.net.Fused8BitRowwiseQuantizedToFloat(blob_in, blob_out)
|
pytorch-master
|
caffe2/python/helpers/quantization.py
|
## @package algebra
# Module caffe2.python.helpers.algebra
def transpose(model, blob_in, blob_out, use_cudnn=False, **kwargs):
"""Transpose."""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
return model.net.Transpose(blob_in, blob_out, **kwargs)
def sum(model, blob_in, blob_out, **kwargs):
"""Sum"""
return model.net.Sum(blob_in, blob_out, **kwargs)
def reduce_sum(model, blob_in, blob_out, **kwargs):
"""ReduceSum"""
return model.net.ReduceSum(blob_in, blob_out, **kwargs)
def sub(model, blob_in, blob_out, **kwargs):
"""Subtract"""
return model.net.Sub(blob_in, blob_out, **kwargs)
def mat_mul(model, blob_in, blob_out, **kwargs):
"""Matrix multiplication"""
return model.net.MatMul(blob_in, blob_out, **kwargs)
def arg_min(model, blob_in, blob_out, **kwargs):
"""ArgMin"""
return model.net.ArgMin(blob_in, blob_out, **kwargs)
def batch_mat_mul(model, blob_in, blob_out,
enable_tensor_core=False, **kwargs):
if enable_tensor_core:
kwargs['engine'] = 'TENSORCORE'
return model.net.BatchMatMul(blob_in, blob_out, **kwargs)
def sparse_lengths_sum_4bit_rowwise_sparse(model, blob_in, blob_out, **kwargs):
return model.net.SparseLengthsSum4BitRowwiseSparse(blob_in, blob_out, **kwargs)
|
pytorch-master
|
caffe2/python/helpers/algebra.py
|
## @package tools
# Module caffe2.python.helpers.tools
def image_input(
model, blob_in, blob_out, order="NCHW", use_gpu_transform=False, **kwargs
):
assert 'is_test' in kwargs, "Argument 'is_test' is required"
if order == "NCHW":
if (use_gpu_transform):
kwargs['use_gpu_transform'] = 1 if use_gpu_transform else 0
# GPU transform will handle NHWC -> NCHW
outputs = model.net.ImageInput(blob_in, blob_out, **kwargs)
pass
else:
outputs = model.net.ImageInput(
blob_in, [blob_out[0] + '_nhwc'] + blob_out[1:], **kwargs
)
outputs_list = list(outputs)
outputs_list[0] = model.net.NHWC2NCHW(outputs_list[0], blob_out[0])
outputs = tuple(outputs_list)
else:
outputs = model.net.ImageInput(blob_in, blob_out, **kwargs)
return outputs
def video_input(model, blob_in, blob_out, **kwargs):
# size of outputs can vary depending on kwargs
outputs = model.net.VideoInput(blob_in, blob_out, **kwargs)
return outputs
|
pytorch-master
|
caffe2/python/helpers/tools.py
|
## @package pooling
# Module caffe2.python.helpers.pooling
## @package fc
# Module caffe2.python.helpers.pooling
def max_pool(model, blob_in, blob_out, use_cudnn=False, order="NCHW", **kwargs):
"""Max pooling"""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
return model.net.MaxPool(blob_in, blob_out, order=order, **kwargs)
def average_pool(model, blob_in, blob_out, use_cudnn=False, order="NCHW",
**kwargs):
"""Average pooling"""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
return model.net.AveragePool(
blob_in,
blob_out,
order=order,
**kwargs
)
def max_pool_with_index(model, blob_in, blob_out, order="NCHW", **kwargs):
"""Max pooling with an explicit index of max position"""
return model.net.MaxPoolWithIndex(
blob_in,
[blob_out, blob_out + "_index"],
order=order,
**kwargs
)[0]
|
pytorch-master
|
caffe2/python/helpers/pooling.py
|
pytorch-master
|
caffe2/python/helpers/__init__.py
|
|
## @package arra_helpers
# Module caffe2.python.helpers.array_helpers
def concat(model, blobs_in, blob_out, **kwargs):
"""Depth Concat."""
if kwargs.get('order') and kwargs.get('axis'):
# The backend throws an error if both are given
kwargs.pop('order')
return model.net.Concat(
blobs_in,
[blob_out, "_" + blob_out + "_concat_dims"],
**kwargs
)[0]
def depth_concat(model, blobs_in, blob_out, **kwargs):
"""The old depth concat function - we should move to use concat."""
print("DepthConcat is deprecated. use Concat instead.")
return concat(blobs_in, blob_out, **kwargs)
|
pytorch-master
|
caffe2/python/helpers/array_helpers.py
|
## @package nonlinearity
# Module caffe2.python.helpers.nonlinearity
from caffe2.python import core
def prelu(model, blob_in, blob_out, num_channels=1, slope_init=None,
**kwargs):
"""PRelu"""
slope_init = (
slope_init if slope_init else ('ConstantFill', {'value': 0.25}))
if model.init_params:
slope = model.param_init_net.__getattr__(slope_init[0])(
[],
blob_out + '_slope',
shape=[num_channels],
**slope_init[1]
)
else:
slope = core.ScopedBlobReference(
blob_out + '_slope', model.param_init_net)
model.AddParameter(slope)
return model.net.PRelu([blob_in, slope], [blob_out])
def relu(model, blob_in, blob_out, use_cudnn=False, order="NCHW", **kwargs):
"""Relu."""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
return model.net.Relu(blob_in, blob_out, order=order, **kwargs)
def tanh(model, blob_in, blob_out, use_cudnn=False, order="NCHW", **kwargs):
"""Tanh."""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
return model.net.Tanh(blob_in, blob_out, order=order, **kwargs)
|
pytorch-master
|
caffe2/python/helpers/nonlinearity.py
|
## @package train
# Module caffe2.python.helpers.train
from caffe2.python import core, scope
from caffe2.proto import caffe2_pb2
def _get_weights(model, namescope=None):
if namescope is None:
namescope = scope.CurrentNameScope()
if namescope == '':
return model.weights[:]
else:
return [w for w in model.weights if w.GetNameScope() == namescope]
def iter(model, blob_out, **kwargs):
if 'device_option' in kwargs:
del kwargs['device_option']
model.param_init_net.ConstantFill(
[],
blob_out,
shape=[1],
value=0,
dtype=core.DataType.INT64,
device_option=core.DeviceOption(caffe2_pb2.CPU, 0),
**kwargs
)
return model.net.Iter(blob_out, blob_out, **kwargs)
def accuracy(model, blob_in, blob_out, **kwargs):
dev = kwargs['device_option'] if 'device_option' in kwargs \
else scope.CurrentDeviceScope()
is_cpu = dev is None or dev.device_type == caffe2_pb2.CPU
# We support top_k > 1 only on CPU
if not is_cpu and 'top_k' in kwargs and kwargs['top_k'] > 1:
pred_host = model.net.CopyGPUToCPU(blob_in[0], blob_in[0] + "_host")
label_host = model.net.CopyGPUToCPU(blob_in[1], blob_in[1] + "_host")
# Now use the Host version of the accuracy op
model.net.Accuracy(
[pred_host, label_host],
blob_out,
device_option=core.DeviceOption(caffe2_pb2.CPU, 0),
**kwargs
)
else:
model.net.Accuracy(blob_in, blob_out)
def add_weight_decay(model, weight_decay):
"""Adds a decay to weights in the model.
This is a form of L2 regularization.
Args:
weight_decay: strength of the regularization
"""
if weight_decay <= 0.0:
return
wd = model.param_init_net.ConstantFill(
[], 'wd', shape=[1], value=weight_decay
)
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
for param in _get_weights(model):
# Equivalent to: grad += wd * param
grad = model.param_to_grad[param]
model.net.WeightedSum(
[grad, ONE, param, wd],
grad,
)
|
pytorch-master
|
caffe2/python/helpers/train.py
|
## @package control_ops
# Module caffe2.python.helpers.control_ops
from caffe2.python.control_ops_util import add_if_op, add_while_op
def cond(model, cond_blob, external_blobs, then_model, else_model=None):
"""Condition"""
add_if_op(
model.net,
cond_blob,
external_blobs,
then_model.net,
else_model.net if else_model else None)
def loop(model, cond_blob, external_blobs, loop_model, cond_model=None):
"""Loop"""
add_while_op(
model.net,
cond_blob,
external_blobs,
loop_model.net,
cond_model.net if cond_model else None)
|
pytorch-master
|
caffe2/python/helpers/control_ops.py
|
## @package dropout
# Module caffe2.python.helpers.dropout
def dropout(model, blob_in, blob_out, use_cudnn=False, **kwargs):
"""dropout"""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
else:
kwargs['engine'] = 'DEFAULT'
assert 'is_test' in kwargs, "Argument 'is_test' is required"
return model.net.Dropout(
blob_in, [blob_out, "_" + blob_out + "_mask"], **kwargs)[0]
|
pytorch-master
|
caffe2/python/helpers/dropout.py
|
## @package conv
# Module caffe2.python.helpers.conv
from caffe2.python import core
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
def _ConvBase(
model,
is_nd,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
float16_compute=False,
**kwargs
):
kernels = []
if is_nd:
if not isinstance(kernel, list):
kernels = [kernel]
else:
kernels = kernel
else:
if isinstance(kernel, list):
assert len(kernel) == 2, "Conv support only a 2D kernel."
kernels = kernel
else:
kernels = [kernel] * 2
requested_engine = kwargs.get('engine')
if requested_engine is not None:
if use_cudnn and requested_engine != 'CUDNN':
raise ValueError(
'When use_cudnn=True, the only engine you can specify is '
'"CUDNN"')
elif not use_cudnn and requested_engine == 'CUDNN':
raise ValueError(
'When use_cudnn=False, the only engine you can specify is '
'""')
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
use_bias =\
False if ("no_bias" in kwargs and kwargs["no_bias"]) else True
blob_out = blob_out or model.net.NextName()
weight_shape = [dim_out]
if order == "NCHW":
weight_shape.append(int(dim_in / group))
weight_shape.extend(kernels)
else:
weight_shape.extend(kernels)
weight_shape.append(int(dim_in / group))
WeightInitializer = initializers.update_initializer(
WeightInitializer, weight_init, ("XavierFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
if not model.init_params:
WeightInitializer = initializers.ExternalInitializer()
BiasInitializer = initializers.ExternalInitializer()
weight = model.create_param(
param_name=blob_out + '_w',
shape=weight_shape,
initializer=WeightInitializer,
tags=ParameterTags.WEIGHT
)
if use_bias:
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_out, ],
initializer=BiasInitializer,
tags=ParameterTags.BIAS
)
if use_bias:
inputs = [blob_in, weight, bias]
else:
inputs = [blob_in, weight]
if transform_inputs is not None:
transform_inputs(model, blob_out, inputs)
# Enable float 16 compute kernel (relevant for CUDA)
if float16_compute:
kwargs['float16_compute'] = True
# For the operator, we no longer need to provide the no_bias field
# because it can automatically figure this out from the number of
# inputs.
if 'no_bias' in kwargs:
del kwargs['no_bias']
if group != 1:
kwargs['group'] = group
if is_nd:
return model.net.Conv(
inputs,
blob_out,
kernels=kernels,
order=order,
**kwargs)
else:
if isinstance(kernel, list):
return model.net.Conv(
inputs,
blob_out,
kernel_h=kernel[0],
kernel_w=kernel[1],
order=order,
**kwargs)
else:
return model.net.Conv(
inputs,
blob_out,
kernel=kernel,
order=order,
**kwargs)
def conv_nd(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
order="NCHW",
**kwargs
):
"""N-dimensional convolution for inputs with NCHW storage order.
"""
assert order == "NCHW", "ConvNd only supported for NCHW storage."
return _ConvBase(model, True, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init, bias_init, WeightInitializer, BiasInitializer,
group, transform_inputs, order=order, **kwargs)
def conv(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
**kwargs
):
"""2-dimensional convolution.
"""
return _ConvBase(model, False, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init, bias_init, WeightInitializer, BiasInitializer,
group, transform_inputs, **kwargs)
def conv_transpose(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
**kwargs
):
"""ConvTranspose.
"""
weight_init = weight_init if weight_init else ('XavierFill', {})
bias_init = bias_init if bias_init else ('ConstantFill', {})
blob_out = blob_out or model.net.NextName()
weight_shape = (
[dim_in, dim_out, kernel, kernel]
if order == "NCHW" else [dim_in, kernel, kernel, dim_out]
)
if model.init_params:
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_w',
shape=weight_shape,
**weight_init[1]
)
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_b',
shape=[dim_out, ],
**bias_init[1]
)
else:
weight = core.ScopedBlobReference(
blob_out + '_w', model.param_init_net)
bias = core.ScopedBlobReference(
blob_out + '_b', model.param_init_net)
model.AddParameter(weight, ParameterTags.WEIGHT)
model.AddParameter(bias, ParameterTags.BIAS)
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
return model.net.ConvTranspose(
[blob_in, weight, bias],
blob_out,
kernel=kernel,
order=order,
**kwargs
)
def group_conv(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
group=1,
**kwargs
):
"""Group Convolution.
This is essentially the same as Conv with a group argument passed in.
We specialize this for backward interface compatibility.
"""
return conv(model, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init=weight_init, bias_init=bias_init,
group=group, **kwargs)
def group_conv_deprecated(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
group=1,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
**kwargs
):
"""GroupConvolution's deprecated interface.
This is used to simulate a group convolution via split and concat. You
should always use the new group convolution in your new code.
"""
weight_init = weight_init if weight_init else ('XavierFill', {})
bias_init = bias_init if bias_init else ('ConstantFill', {})
use_bias = False if ("no_bias" in kwargs and kwargs["no_bias"]) else True
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
if dim_in % group:
raise ValueError("dim_in should be divisible by group.")
if dim_out % group:
raise ValueError("dim_out should be divisible by group.")
splitted_blobs = model.net.DepthSplit(
blob_in,
['_' + blob_out + '_gconv_split_' + str(i) for i in range(group)],
dimensions=[int(dim_in / group) for i in range(group)],
order=order
)
weight_shape = (
[dim_out / group, dim_in / group, kernel, kernel]
if order == "NCHW" else
[dim_out / group, kernel, kernel, dim_in / group]
)
# Make sure that the shapes are of int format. Especially for py3 where
# int division gives float output.
weight_shape = [int(v) for v in weight_shape]
conv_blobs = []
for i in range(group):
if model.init_params:
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_gconv_%d_w' % i,
shape=weight_shape,
**weight_init[1]
)
if use_bias:
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_gconv_%d_b' % i,
shape=[int(dim_out / group)],
**bias_init[1]
)
else:
weight = core.ScopedBlobReference(
blob_out + '_gconv_%d_w' % i, model.param_init_net)
if use_bias:
bias = core.ScopedBlobReference(
blob_out + '_gconv_%d_b' % i, model.param_init_net)
model.AddParameter(weight, ParameterTags.WEIGHT)
if use_bias:
model.AddParameter(bias, ParameterTags.BIAS)
if use_bias:
inputs = [weight, bias]
else:
inputs = [weight]
if 'no_bias' in kwargs:
del kwargs['no_bias']
conv_blobs.append(
splitted_blobs[i].Conv(
inputs,
blob_out + '_gconv_%d' % i,
kernel=kernel,
order=order,
**kwargs
)
)
concat, concat_dims = model.net.Concat(
conv_blobs,
[blob_out,
"_" + blob_out + "_concat_dims"],
order=order
)
return concat
|
pytorch-master
|
caffe2/python/helpers/conv.py
|
import contextlib
import copy
import threading
_threadlocal_scope = threading.local()
@contextlib.contextmanager
def arg_scope(single_helper_or_list, **kwargs):
global _threadlocal_scope
if not isinstance(single_helper_or_list, list):
assert callable(single_helper_or_list), \
"arg_scope is only supporting single or a list of helper functions."
single_helper_or_list = [single_helper_or_list]
old_scope = copy.deepcopy(get_current_scope())
for helper in single_helper_or_list:
assert callable(helper), \
"arg_scope is only supporting a list of callable helper functions."
helper_key = helper.__name__
if helper_key not in old_scope:
_threadlocal_scope.current_scope[helper_key] = {}
_threadlocal_scope.current_scope[helper_key].update(kwargs)
yield
_threadlocal_scope.current_scope = old_scope
def get_current_scope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "current_scope"):
_threadlocal_scope.current_scope = {}
return _threadlocal_scope.current_scope
|
pytorch-master
|
caffe2/python/helpers/arg_scope.py
|
## @package normalization
# Module caffe2.python.helpers.normalization
from caffe2.python import scope
from caffe2.python.modeling.parameter_info import ParameterTags
from caffe2.proto import caffe2_pb2
from caffe2.python.modeling import initializers
def lrn(model, blob_in, blob_out, order="NCHW", use_cudnn=False, **kwargs):
"""LRN"""
dev = kwargs['device_option'] if 'device_option' in kwargs \
else scope.CurrentDeviceScope()
is_cpu = dev is None or dev.device_type == caffe2_pb2.CPU
if use_cudnn and (not is_cpu):
kwargs['engine'] = 'CUDNN'
blobs_out = blob_out
else:
blobs_out = [blob_out, "_" + blob_out + "_scale"]
lrn = model.net.LRN(
blob_in,
blobs_out,
order=order,
**kwargs
)
if use_cudnn and (not is_cpu):
return lrn
else:
return lrn[0]
def softmax(model, blob_in, blob_out=None, use_cudnn=False, **kwargs):
"""Softmax."""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
if blob_out is not None:
return model.net.Softmax(blob_in, blob_out, **kwargs)
else:
return model.net.Softmax(blob_in, **kwargs)
def instance_norm(model, blob_in, blob_out, dim_in, order="NCHW", **kwargs):
blob_out = blob_out or model.net.NextName()
# Input: input, scale, bias
# Output: output, saved_mean, saved_inv_std
# scale: initialize with ones
# bias: initialize with zeros
def init_blob(value, suffix):
return model.param_init_net.ConstantFill(
[], blob_out + "_" + suffix, shape=[dim_in], value=value)
scale, bias = init_blob(1.0, "s"), init_blob(0.0, "b")
model.AddParameter(scale, ParameterTags.WEIGHT)
model.AddParameter(bias, ParameterTags.BIAS)
blob_outs = [blob_out, blob_out + "_sm", blob_out + "_siv"]
if 'is_test' in kwargs and kwargs['is_test']:
blob_outputs = model.net.InstanceNorm(
[blob_in, scale, bias], [blob_out],
order=order, **kwargs)
return blob_outputs
else:
blob_outputs = model.net.InstanceNorm(
[blob_in, scale, bias], blob_outs,
order=order, **kwargs)
# Return the output
return blob_outputs[0]
def spatial_bn(model, blob_in, blob_out, dim_in,
init_scale=1., init_bias=0.,
ScaleInitializer=None, BiasInitializer=None,
RunningMeanInitializer=None, RunningVarianceInitializer=None,
order="NCHW", **kwargs):
blob_out = blob_out or model.net.NextName()
# Input: input, scale, bias, est_mean, est_inv_var
# Output: output, running_mean, running_inv_var, saved_mean,
# saved_inv_var
# scale: initialize with init_scale (default 1.)
# bias: initialize with init_bias (default 0.)
# est mean: zero
# est var: ones
if model.init_params:
scale_init = ("ConstantFill", {'value': init_scale})
bias_init = ("ConstantFill", {'value': init_bias})
rm_init = ("ConstantFill", {'value': 0.0})
riv_init = ("ConstantFill", {'value': 1.0})
ScaleInitializer = initializers.update_initializer(
ScaleInitializer, scale_init, ("ConstantFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
RunningMeanInitializer = initializers.update_initializer(
RunningMeanInitializer, rm_init, ("ConstantFill", {})
)
RunningVarianceInitializer = initializers.update_initializer(
RunningVarianceInitializer, riv_init, ("ConstantFill", {})
)
else:
ScaleInitializer = initializers.ExternalInitializer()
BiasInitializer = initializers.ExternalInitializer()
RunningMeanInitializer = initializers.ExternalInitializer()
RunningVarianceInitializer = initializers.ExternalInitializer()
scale = model.create_param(
param_name=blob_out + '_s',
shape=[dim_in],
initializer=ScaleInitializer,
tags=ParameterTags.WEIGHT
)
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_in],
initializer=BiasInitializer,
tags=ParameterTags.BIAS
)
running_mean = model.create_param(
param_name=blob_out + '_rm',
shape=[dim_in],
initializer=RunningMeanInitializer,
tags=ParameterTags.COMPUTED_PARAM
)
running_inv_var = model.create_param(
param_name=blob_out + '_riv',
shape=[dim_in],
initializer=RunningVarianceInitializer,
tags=ParameterTags.COMPUTED_PARAM
)
blob_outs = [blob_out, running_mean, running_inv_var,
blob_out + "_sm", blob_out + "_siv"]
if 'is_test' in kwargs and kwargs['is_test']:
blob_outputs = model.net.SpatialBN(
[blob_in, scale, bias, blob_outs[1], blob_outs[2]], [blob_out],
order=order, **kwargs)
return blob_outputs
else:
blob_outputs = model.net.SpatialBN(
[blob_in, scale, bias, blob_outs[1], blob_outs[2]], blob_outs,
order=order, **kwargs)
# Return the output
return blob_outputs[0]
def spatial_gn(model, blob_in, blob_out, dim_in,
init_scale=1., init_bias=0.,
ScaleInitializer=None, BiasInitializer=None,
RunningMeanInitializer=None, RunningVarianceInitializer=None,
order="NCHW", **kwargs):
'''
Group normalizes the input, cf. https://arxiv.org/abs/1803.08494.
'''
blob_out = blob_out or model.net.NextName()
# Input: input, scale, bias
# Output: output, group_mean, group_inv_std
# scale: initialize with init_scale (default 1.)
# [recommendation: set init_scale = 0. in the last layer for each res block]
# bias: initialize with init_bias (default 0.)
if model.init_params:
scale_init = ("ConstantFill", {'value': init_scale})
bias_init = ("ConstantFill", {'value': init_bias})
ScaleInitializer = initializers.update_initializer(
ScaleInitializer, scale_init, ("ConstantFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
else:
ScaleInitializer = initializers.ExternalInitializer()
BiasInitializer = initializers.ExternalInitializer()
scale = model.create_param(
param_name=blob_out + '_s',
shape=[dim_in],
initializer=ScaleInitializer,
tags=ParameterTags.WEIGHT
)
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_in],
initializer=BiasInitializer,
tags=ParameterTags.BIAS
)
blob_outs = [blob_out,
blob_out + "_mean", blob_out + "_std"]
blob_outputs = model.net.GroupNorm(
[blob_in, scale, bias],
blob_outs,
**kwargs)
# Return the output
return blob_outputs[0]
def layer_norm(
model,
blob_in,
blob_out,
dim_in,
axis=1,
epsilon=1e-4,
initial_scale=1.0,
initial_bias=0.0,
):
'''
Layer normalizes the input, cf. https://arxiv.org/pdf/1607.06450.pdf.
Args:
blob_in: The input blob to layer normalize.
blob_out: The layer normalized output blob.
dim_in: The dimension of the scale and bias. For example, if blob_in is
a 2D design matrix and axis is 1, this would be the number of
columns.
axis: (optional) The axis to normalize. Typically the feature axis.
Defaults to 1.
epsilon: (optional) A small value used for numerical stability in
calculation. Defaults to 1e-4.
initial_scale: (optional) The initial value for the learned scale
parameter. Defaults to 1.0
initial_bias: (optional) The initial value for the learned bias
parameter of the layerwise standard deviation. Defaults to 0.0.
Returns:
A 3-tuple consisting of:
- The layer normalized input blob.
- The mean of the input blob across the given axis.
- The standard deviation of the input blob acress the given axis.
'''
# The learned multiplicative scale or "gain".
scale = model.create_param(
param_name='{}_scale'.format(blob_out),
shape=[dim_in] if isinstance(dim_in, int) else dim_in,
initializer=initializers.Initializer(
'ConstantFill',
value=initial_scale,
),
tags=ParameterTags.WEIGHT,
)
# The learned additive bias or "shift".
bias = model.create_param(
param_name='{}_bias'.format(blob_out),
shape=[dim_in] if isinstance(dim_in, int) else dim_in,
initializer=initializers.Initializer(
'ConstantFill',
value=initial_bias,
),
tags=ParameterTags.BIAS,
)
normalized, mean, std = model.net.LayerNorm(
[blob_in, scale, bias],
[blob_out, blob_out + "_mean", blob_out + "_std"],
axis=axis,
epsilon=epsilon,
elementwise_affine=True,
)
return normalized, mean, std
def moments_with_running_stats(model, blob_in, blob_out, dim_in,
RunningMeanInitializer=None, RunningVarianceInitializer=None,
order="NCHW", **kwargs):
if model.init_params:
rm_init = ("ConstantFill", {'value': 0.0})
riv_init = ("ConstantFill", {'value': 1.0})
RunningMeanInitializer = initializers.update_initializer(
RunningMeanInitializer, rm_init, ("ConstantFill", {})
)
RunningVarianceInitializer = initializers.update_initializer(
RunningVarianceInitializer, riv_init, ("ConstantFill", {})
)
else:
RunningMeanInitializer = initializers.ExternalInitializer()
RunningVarianceInitializer = initializers.ExternalInitializer()
running_mean = model.create_param(
param_name=blob_out + '_rm',
shape=[dim_in],
initializer=RunningMeanInitializer,
tags=ParameterTags.COMPUTED_PARAM
)
# this is just running variance
running_inv_var = model.create_param(
param_name=blob_out + '_riv',
shape=[dim_in],
initializer=RunningVarianceInitializer,
tags=ParameterTags.COMPUTED_PARAM
)
blob_outs = [blob_out + "_sm", blob_out + "_sv"]
if order == 'NCHW':
blob_outputs = model.net.Moments(
[blob_in], blob_outs,
axes=[0, 2, 3],
order=order, keepdims=False, **kwargs)
elif order == 'NHWC':
blob_outputs = model.net.Moments(
[blob_in], blob_outs,
axes=[0, 1, 2],
order=order, keepdims=False, **kwargs)
return blob_outputs
|
pytorch-master
|
caffe2/python/helpers/normalization.py
|
## @package db_input
# Module caffe2.python.helpers.db_input
def db_input(model, blobs_out, batch_size, db, db_type):
dbreader_name = "dbreader_" + db
dbreader = model.param_init_net.CreateDB(
[],
dbreader_name,
db=db,
db_type=db_type,
)
return model.net.TensorProtosDBInput(
dbreader, blobs_out, batch_size=batch_size)
|
pytorch-master
|
caffe2/python/helpers/db_input.py
|
pytorch-master
|
caffe2/python/rnn/__init__.py
|
|
from caffe2.python import workspace, scope
from caffe2.python.model_helper import ModelHelper
import numpy as np
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
def tanh(x):
return 2.0 * sigmoid(2.0 * x) - 1
def _prepare_rnn(
t, n, dim_in, create_rnn, outputs_with_grads,
forget_bias, memory_optim=False,
forward_only=False, drop_states=False, T=None,
two_d_initial_states=None, dim_out=None,
num_states=2,
**kwargs
):
if dim_out is None:
dim_out = [dim_in]
print("Dims: ", t, n, dim_in, dim_out)
model = ModelHelper(name='external')
if two_d_initial_states is None:
two_d_initial_states = np.random.randint(2)
def generate_input_state(n, d):
if two_d_initial_states:
return np.random.randn(n, d).astype(np.float32)
else:
return np.random.randn(1, n, d).astype(np.float32)
states = []
for layer_id, d in enumerate(dim_out):
for i in range(num_states):
state_name = "state_{}/layer_{}".format(i, layer_id)
states.append(model.net.AddExternalInput(state_name))
workspace.FeedBlob(
states[-1], generate_input_state(n, d).astype(np.float32))
# Due to convoluted RNN scoping logic we make sure that things
# work from a namescope
with scope.NameScope("test_name_scope"):
input_blob, seq_lengths = model.net.AddScopedExternalInputs(
'input_blob', 'seq_lengths')
outputs = create_rnn(
model, input_blob, seq_lengths, states,
dim_in=dim_in, dim_out=dim_out, scope="external/recurrent",
outputs_with_grads=outputs_with_grads,
memory_optimization=memory_optim,
forget_bias=forget_bias,
forward_only=forward_only,
drop_states=drop_states,
static_rnn_unroll_size=T,
**kwargs
)
workspace.RunNetOnce(model.param_init_net)
workspace.FeedBlob(
seq_lengths,
np.random.randint(1, t + 1, size=(n,)).astype(np.int32)
)
return outputs, model.net, states + [input_blob]
|
pytorch-master
|
caffe2/python/rnn/rnn_cell_test_util.py
|
from caffe2.python import workspace, core, lstm_benchmark, utils
from copy import copy
@utils.debug
def Compare(args):
results = []
num_iters = 1000
args.gpu = True
with core.DeviceScope(core.DeviceOption(workspace.GpuDeviceType, 0)):
for batch_size in [64, 128, 256]:
for seq_length in [20, 100]:
for hidden_dim in [40, 100, 400, 800]:
args.batch_size = batch_size
args.seq_length = seq_length
args.hidden_dim = hidden_dim
args.data_size = batch_size * seq_length * num_iters
args.iters_to_report = num_iters // 3
args.implementation = 'own'
t_own = lstm_benchmark.Benchmark(args)
workspace.ResetWorkspace()
args.implementation = 'cudnn'
t_cudnn = lstm_benchmark.Benchmark(args)
workspace.ResetWorkspace()
results.append((copy(args), float(t_own), float(t_cudnn)))
print(args)
print("t_cudnn / t_own: {}".format(t_cudnn / t_own))
for args, t_own, t_cudnn in results:
print("{}: cudnn time: {}, own time: {}, ratio: {}".format(
str(args), t_cudnn, t_own, t_cudnn / t_own))
ratio_sum = 0
for args, t_own, t_cudnn in results:
ratio = float(t_cudnn) / t_own
ratio_sum += ratio
print("hidden_dim: {}, seq_lengths: {}, batch_size: {}, num_layers: {}:"
" cudnn time: {}, own time: {}, ratio: {}".format(
args.hidden_dim, args.seq_length, args.batch_size,
args.num_layers, t_cudnn, t_own, ratio))
print("Ratio average: {}".format(ratio_sum / len(results)))
if __name__ == '__main__':
args = lstm_benchmark.GetArgumentParser().parse_args()
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_print_blob_sizes_at_exit=0',
'--caffe2_gpu_memory_tracking=1'])
Compare(args)
|
pytorch-master
|
caffe2/python/rnn/lstm_comparison.py
|
import os
import uuid
from caffe2.distributed.python import StoreHandlerTimeoutError
from caffe2.distributed.store_ops_test_util import StoreOpsTests
from caffe2.python import core, workspace, dyndep
from caffe2.python.test_util import TestCase
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:redis_store_handler_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:store_ops")
class TestRedisStoreHandlerOp(TestCase):
def setUp(self):
super(TestRedisStoreHandlerOp, self).setUp()
self.uuid = str(uuid.uuid4()) + "/"
def tearDown(self):
super(TestRedisStoreHandlerOp, self).tearDown()
def create_store_handler(self):
store_handler = "store_handler"
workspace.RunOperatorOnce(
core.CreateOperator(
"RedisStoreHandlerCreate",
[],
[store_handler],
prefix=self.uuid,
host=os.getenv("REDIS_HOST", "localhost"),
port=int(os.getenv("REDIS_PORT", 6379))))
return store_handler
def test_set_get(self):
StoreOpsTests.test_set_get(self.create_store_handler)
def test_get_timeout(self):
with self.assertRaises(StoreHandlerTimeoutError):
StoreOpsTests.test_get_timeout(self.create_store_handler)
|
pytorch-master
|
caffe2/distributed/redis_store_handler_op_test.py
|
pytorch-master
|
caffe2/distributed/__init__.py
|
|
## @package store_ops_test_util
# Module caffe2.distributed.store_ops_test_util
from multiprocessing import Process, Queue
import numpy as np
from caffe2.python import core, workspace
class StoreOpsTests(object):
@classmethod
def _test_set_get(cls, queue, create_store_handler_fn, index, num_procs):
store_handler = create_store_handler_fn()
blob = "blob"
value = np.full(1, 1, np.float32)
# Use last process to set blob to make sure other processes
# are waiting for the blob before it is set.
if index == (num_procs - 1):
workspace.FeedBlob(blob, value)
workspace.RunOperatorOnce(
core.CreateOperator(
"StoreSet",
[store_handler, blob],
[],
blob_name=blob))
output_blob = "output_blob"
workspace.RunOperatorOnce(
core.CreateOperator(
"StoreGet",
[store_handler],
[output_blob],
blob_name=blob))
try:
np.testing.assert_array_equal(workspace.FetchBlob(output_blob), 1)
except AssertionError as err:
queue.put(err)
workspace.ResetWorkspace()
@classmethod
def test_set_get(cls, create_store_handler_fn):
# Queue for assertion errors on subprocesses
queue = Queue()
# Start N processes in the background
num_procs = 4
procs = []
for index in range(num_procs):
proc = Process(
target=cls._test_set_get,
args=(queue, create_store_handler_fn, index, num_procs, ))
proc.start()
procs.append(proc)
# Test complete, join background processes
for proc in procs:
proc.join()
# Raise first error we find, if any
if not queue.empty():
raise queue.get()
@classmethod
def test_get_timeout(cls, create_store_handler_fn):
store_handler = create_store_handler_fn()
net = core.Net('get_missing_blob')
net.StoreGet([store_handler], 1, blob_name='blob')
workspace.RunNetOnce(net)
|
pytorch-master
|
caffe2/distributed/store_ops_test_util.py
|
import errno
import os
import tempfile
import shutil
from caffe2.distributed.python import StoreHandlerTimeoutError
from caffe2.distributed.store_ops_test_util import StoreOpsTests
from caffe2.python import core, workspace, dyndep
from caffe2.python.test_util import TestCase
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:file_store_handler_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:store_ops")
class TestFileStoreHandlerOp(TestCase):
testCounter = 0
def setUp(self):
super(TestFileStoreHandlerOp, self).setUp()
self.tmpdir = tempfile.mkdtemp()
# Use counter to tell test cases apart
TestFileStoreHandlerOp.testCounter += 1
def tearDown(self):
shutil.rmtree(self.tmpdir)
super(TestFileStoreHandlerOp, self).tearDown()
def create_store_handler(self):
# Use new path for every test so they are isolated
path = self.tmpdir + "/" + str(TestFileStoreHandlerOp.testCounter)
# Ensure path exists (including counter)
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
store_handler = "store_handler"
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate",
[],
[store_handler],
path=path))
return store_handler
def test_set_get(self):
StoreOpsTests.test_set_get(self.create_store_handler)
def test_get_timeout(self):
with self.assertRaises(StoreHandlerTimeoutError):
StoreOpsTests.test_get_timeout(self.create_store_handler)
|
pytorch-master
|
caffe2/distributed/file_store_handler_op_test.py
|
pytorch-master
|
caffe2/perfkernels/__init__.py
|
|
import argparse
import sys
sizeof = {"float": 4, "at::Half": 2, "uint8_t": 1}
def unroll(uf, IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(regid, InType, use_weights, isa, prefetch):
code = []
if InType == "float":
code.append(
" vop%d = _mm256_fmadd_ps(vwgt, _mm256_loadu_ps(ip + (%d)), vop%d);" # noqa
% (regid, regid, regid)
)
elif InType == "at::Half":
code.append(
" vop%d = _mm256_fmadd_ps(\n"
" vwgt,\n"
" _mm256_cvtph_ps(\n"
" _mm_loadu_si128(reinterpret_cast<const __m128i*>(ip + (%d)))),\n" # noqa
" vop%d);" % (regid, regid, regid)
)
elif InType == "uint8_t":
code.append(
" vop%d = _mm256_fmadd_ps(\n"
" vwgt,\n"
" _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(\n"
" _mm_loadl_epi64(reinterpret_cast<const __m128i*>(ip + (%d))))),\n" # noqa
" _mm256_add_ps(vop%d, vbio));" % (regid, regid, regid)
)
else:
assert False
if prefetch:
code.append(
" _mm_prefetch(\n"
" reinterpret_cast<const char*>(&ip_next_T0[%d]), _MM_HINT_T0);"
% (regid)
)
else:
code.append(
" // skip unnecessary prefetch of (&ip_next_T0[%d])" % (regid)
)
return code
code = []
code.append(" // unrolling " + str(uf) + " times")
if use_offsets:
code.append(
" for ("
+ IndexType
+ " rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) {"
)
else:
code.append(
" for ("
+ IndexType
+ " rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) {"
)
code.append(" " + OutType + "* op = &out[rangeIndex * block_size];")
for i in range(0, uf):
j = 8 * i
code.append(" __m256 vop" + str(j) + " = _mm256_setzero_ps();")
# inner loop
if use_offsets:
code.append(
" if (dataInd != offsets[rangeIndex] - offsets[0]) {\n"
+ " return false;\n"
+ " }"
)
code.append("""\
int64_t end_offset = offsets[rangeIndex + 1];
int64_t length = end_offset - offsets[rangeIndex];""")
code.append(
" for ("
+ "int64_t"
+ " start = dataInd; dataInd < end_offset - offsets[0];\n ++dataInd) {" # noqa
)
else:
code.append(
" if (dataInd + lengths[rangeIndex] > index_size) {\n"
+ " return false;\n"
+ " }"
)
code.append(
" for ("
+ IndexType
+ " start = dataInd; dataInd < start + lengths[rangeIndex];\n ++dataInd) {" # noqa
)
code.append(" const " + IndexType + " idx = indices[dataInd];")
code.append(
" if (idx < 0 || idx >= data_size) {\n"
+ " return false;\n"
+ " }"
)
if InType == "uint8_t":
code.append(" " + OutType + " wgt = 1.f;")
code.append(" " + OutType + " bio;")
code.append(" if (weights) {")
code.append(
" wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];" # noqa
)
code.append(" }")
if fused:
code.append(
" const float* scale_bias = reinterpret_cast<const float*>(\n"
" &input[idx * fused_block_size + block_size]);"
)
code.append(" bio = wgt * scale_bias[1];")
code.append(" wgt = wgt * scale_bias[0];")
else:
code.append(" bio = wgt * scale_bias[2 * idx + 1];")
code.append(" wgt = wgt * scale_bias[2 * idx];")
code.append(" __m256 vbio = _mm256_set1_ps(bio);")
else:
code.append(" " + OutType + " wgt = 1.f;")
code.append(" if (weights) {")
code.append(
" wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];" # noqa
)
code.append(" }")
code.append(" __m256 vwgt = _mm256_set1_ps(wgt);")
code.append(" const {}* ip = &input[idx * fused_block_size];".format(InType))
code.append(
" const {} next_T0 = (dataInd < index_size - prefdist_T0)\n"
" ? (dataInd + prefdist_T0)\n : dataInd;".format(
IndexType
)
)
code.append(" const " + IndexType + " idx_pref_T0 = indices[next_T0];")
code.append(
" if (idx_pref_T0 < 0 || idx_pref_T0 >= data_size) {\n"
+ " return false;\n"
+ " }"
)
code.append(
" const {}* ip_next_T0 = "
"&input[idx_pref_T0 * fused_block_size];".format(InType)
)
for i in range(0, uf):
j = 8 * i
cachelinesize = 64
byteoffset = sizeof[InType] * j
prefetch = (byteoffset % cachelinesize) == 0
code.extend(compute(j, InType, use_weights, isa, prefetch))
code.append(" }")
if use_offsets:
code.append(" if (!normalize_by_lengths || length == 0) {")
else:
code.append(" if (!normalize_by_lengths || lengths[rangeIndex] == 0) {")
for i in range(0, uf):
j = 8 * i
code.append(" _mm256_storeu_ps(&op[" + str(j) + "], vop" + str(j) + ");")
code.append(" } else {")
# inv of length
if use_offsets:
code.append(" __m256 vlen_inv = _mm256_set1_ps(1.0f / length);")
else:
code.append(" __m256 vlen_inv = _mm256_set1_ps(1.0f / lengths[rangeIndex]);")
for i in range(0, uf):
j = 8 * i
code.append(
" _mm256_storeu_ps(&op["
+ str(j)
+ "], _mm256_mul_ps("
+ "vop"
+ str(j)
+ ", vlen_inv));"
)
code.append(" }")
code.append(" }")
return code
def generic(IndexType, InType, OutType, use_weights, isa, fused, use_offsets):
def compute(InType, use_weights, isa):
code = []
if InType == "float":
code.append(
" _mm256_storeu_ps(\n"
" &op[j],\n"
" _mm256_fmadd_ps(\n"
" vwgt, _mm256_loadu_ps(&ip[j]), _mm256_loadu_ps(&op[j])));" # noqa
)
elif InType == "at::Half":
code.append(
" _mm256_storeu_ps(\n"
" &op[j],\n"
" _mm256_fmadd_ps(\n"
" vwgt,\n"
" _mm256_cvtph_ps(_mm_loadu_si128(\n"
" reinterpret_cast<const __m128i*>(&ip[j]))),\n"
" _mm256_loadu_ps(&op[j])));"
)
elif InType == "uint8_t":
code.append(
" _mm256_storeu_ps(\n"
" &op[j],\n"
" _mm256_fmadd_ps(\n"
" vwgt,\n"
" _mm256_cvtepi32_ps(_mm256_cvtepu8_epi32(_mm_loadl_epi64(\n" # noqa
" reinterpret_cast<const __m128i*>(&ip[j])))),\n"
" _mm256_add_ps(_mm256_loadu_ps(&op[j]), vbio)));"
)
else:
assert False
code.append(
" _mm_prefetch(\n"
" reinterpret_cast<const char*>(&ip_next_T0[j]), _MM_HINT_T0);"
)
return code
code = []
if InType == "at::Half":
code.append(" alignas(64) at::Half vtmp1[8] = {0};")
if use_offsets:
code.append(
" for ("
+ IndexType
+ " rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) {"
)
else:
code.append(
" for ("
+ IndexType
+ " rangeIndex = 0; rangeIndex < output_size; ++rangeIndex) {"
)
code.append(" " + OutType + "* op = &out[rangeIndex * block_size];")
# initialize to 0
code.append(" int64_t j = 0;")
code.append(" for (; j + 8 <= block_size; j += 8) {")
code.append(" _mm256_storeu_ps(op + j, _mm256_setzero_ps());")
code.append(" }")
code.append(" for (; j < block_size; j++) {")
code.append(" op[j] = 0.0f;")
code.append(" }")
# inner loop
if use_offsets:
code.append(
" if (dataInd != offsets[rangeIndex] - offsets[0]) {\n"
+ " return false;\n"
+ " }"
)
code.append("""\
int64_t end_offset = offsets[rangeIndex + 1];
int64_t length = end_offset - offsets[rangeIndex];""")
code.append(
" for ("
+ "int64_t"
+ " start = dataInd; dataInd < end_offset - offsets[0];\n ++dataInd) {" # noqa
)
else:
code.append(
" if (dataInd + lengths[rangeIndex] > index_size) {\n"
+ " return false;\n"
+ " }"
)
code.append(
" for ("
+ IndexType
+ " start = dataInd; dataInd < start + lengths[rangeIndex];\n ++dataInd) {" # noqa
)
code.append(" const " + IndexType + " idx = indices[dataInd];")
code.append(
" if (idx < 0 || idx >= data_size) {\n"
+ " return false;\n"
+ " }"
)
if InType == "uint8_t":
code.append(" " + OutType + " wgt = 1.f;")
code.append(" " + OutType + " bio;")
code.append(" if (weights) {")
code.append(
" wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];" # noqa
)
code.append(" }")
if fused:
code.append(
" const float* scale_bias = reinterpret_cast<const float*>(\n"
" &input[idx * fused_block_size + block_size]);"
)
code.append(" bio = wgt * scale_bias[1];")
code.append(" wgt = wgt * scale_bias[0];")
else:
code.append(" bio = wgt * scale_bias[2 * idx + 1];")
code.append(" wgt = wgt * scale_bias[2 * idx];")
code.append(" __m256 vbio = _mm256_set1_ps(bio);")
else:
code.append(" " + OutType + " wgt = 1.f;")
code.append(" if (weights) {")
code.append(
" wgt = weights[IS_WEIGHT_POSITIONAL ? (dataInd - start) : dataInd];" # noqa
)
code.append(" }")
code.append(" __m256 vwgt = _mm256_set1_ps(wgt);")
code.append(" const {}* ip = &input[idx * fused_block_size];".format(InType))
code.append(
" const {} next_T0 = (dataInd < index_size - prefdist_T0)\n"
" ? (dataInd + prefdist_T0)\n : dataInd;".format(
IndexType
)
)
code.append(" const " + IndexType + " idx_pref_T0 = indices[next_T0];")
code.append(
" if (idx_pref_T0 < 0 || idx_pref_T0 >= data_size) {\n"
+ " return false;\n"
+ " }"
)
code.append(
" const {}* ip_next_T0 = "
"&input[idx_pref_T0 * fused_block_size];".format(InType)
)
# compute and store main loop
code.append(" j = 0;")
code.append(" for (; j + 8 <= block_size; j += 8) {")
code.extend(compute(InType, use_weights, isa))
code.append(" }")
# leftover
code.append(" for (; j < block_size; j++) {")
if InType == "float":
code.append(" op[j] = std::fma(wgt, ip[j], op[j]);")
elif InType == "at::Half":
code.append(" vtmp1[0] = ip[j];")
code.append(
" __m256 vtmp2 =\n"
" _mm256_cvtph_ps(*(reinterpret_cast<const __m128i*>(vtmp1)));"
)
code.append(" op[j] = std::fma(wgt, ((float*)(&vtmp2))[0], op[j]);")
elif InType == "uint8_t":
code.append(" op[j] = std::fma(wgt, (float)ip[j], bio + op[j]);")
else:
assert False
code.append(" }")
code.append(" }")
if use_offsets:
code.append(" if (normalize_by_lengths && length) {")
code.append(" float len_inv = 1.0f / length;")
else:
code.append(" if (normalize_by_lengths && lengths[rangeIndex]) {")
code.append(" float len_inv = 1.0f / lengths[rangeIndex];")
code.append(" __m256 vlen_inv = _mm256_set1_ps(len_inv);")
code.append(" j = 0;")
code.append(" for (; j + 8 <= block_size; j += 8) {")
code.append(
" _mm256_storeu_ps(\n"
" &op[j], _mm256_mul_ps(_mm256_loadu_ps(&op[j]), vlen_inv));"
)
code.append(" }")
code.append(" for (; j < block_size; j++) {")
code.append(" op[j] = len_inv * op[j];")
code.append(" }")
code.append(" }")
code.append(" }")
return code
# start main code
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--filename", help="file name")
parser.add_argument("--fused", action="store_true")
parser.add_argument("--use-offsets", action="store_true")
opts = parser.parse_args()
if opts.filename:
filename = opts.filename
elif opts.fused:
if opts.use_offsets:
filename = "embedding_lookup_fused_8bit_rowwise_idx_avx2.cc"
else:
filename = "embedding_lookup_fused_8bit_rowwise_avx2.cc"
else:
if opts.use_offsets:
filename = "embedding_lookup_idx_avx2.cc"
else:
filename = "embedding_lookup_avx2.cc"
options = [
["int32_t", "int", "float", "float", "float", "float"],
["int64_t", "int64_t", "float", "float", "float", "float"],
["int32_t", "int", "half", "at::Half", "float", "float"],
["int64_t", "int64_t", "half", "at::Half", "float", "float"],
["int32_t", "int", "uint8_t", "uint8_t", "float", "float"],
["int64_t", "int64_t", "uint8_t", "uint8_t", "float", "float"],
]
code = []
# includes
code.append("//// --------------------------")
code.append("//// ATTENTION:")
code.append("//// THIS CODE IS AUTOGENERATED")
code.append("//// BY {}".format(sys.argv[0]))
code.append("//// DO NOT MODIFY!!!")
code.append("//// --------------------------\n")
code.append("#include <c10/util/Half.h>")
code.append("#include <immintrin.h>")
code.append("namespace caffe2 {\n")
for o in options:
[IndexTypeName, IndexType, InTypeName, InType, OutTypeName, OutType] = o
prefix = "Fused8BitRowwise" if opts.fused else ""
code.append("template <bool IS_WEIGHT_POSITIONAL>")
if opts.use_offsets:
fn_base = "{}EmbeddingLookupIdx_{}_{}_{}".format(
prefix, IndexTypeName, InTypeName, OutTypeName
)
else:
fn_base = "{}EmbeddingLookup_{}_{}_{}".format(
prefix, IndexTypeName, InTypeName, OutTypeName
)
suffix = "__avx2_fma"
fn = "static bool " + fn_base + suffix
code.append(fn + "(")
args = []
args.append(" const int64_t block_size,")
args.append(" const int64_t output_size,")
args.append(" const int64_t index_size,")
args.append(" const int64_t data_size,")
args.append(" const " + InType + "* input,")
args.append(" const " + IndexType + "* indices,")
if opts.use_offsets:
args.append(" const " + IndexType + "* offsets,")
else:
args.append(" const int* lengths,")
args.append(" const float* weights,")
if not opts.fused:
args.append(" const float* scale_bias,")
args.append(" bool normalize_by_lengths,")
args.append(" " + OutType + "* out) {")
code += args
code.append(" const " + IndexType + " prefdist_T0 = 16;")
# block_size is the number of elements and fused_block_size is the size of
# an entire row, including scale and bias.
offset = (8 // sizeof[InType]) if opts.fused else 0
code.append(
" const {} fused_block_size = block_size + {};".format(IndexType, offset)
)
if opts.use_offsets:
code.append(" int64_t dataInd = 0;")
else:
code.append(" " + IndexType + " dataInd = 0;")
# code.append("printf(\"calling " + fn + "\\n\");");
code.append(" if (block_size == 128) {")
code += unroll(16, IndexType, InType, OutType, True, "AVX2", opts.fused, opts.use_offsets)
code.append(" } else if (block_size == 64) {")
code += unroll(8, IndexType, InType, OutType, True, "AVX2", opts.fused, opts.use_offsets)
code.append(" } else if (block_size == 32) {")
code += unroll(4, IndexType, InType, OutType, True, "AVX2", opts.fused, opts.use_offsets)
code.append(" } else if (block_size == 16) {")
code += unroll(2, IndexType, InType, OutType, True, "AVX2", opts.fused, opts.use_offsets)
code.append(" } else {")
code.append(" // generic code")
code += generic(IndexType, InType, OutType, True, "AVX2", opts.fused, opts.use_offsets)
code.append(" }")
code.append(" return dataInd == index_size;")
code.append("}")
for is_weight_positional in ["false", "true"]:
code.append("bool " + fn_base + "_" + is_weight_positional + suffix + "(")
code += args
# Resolve the Lint warnings: Limit of 80 characters in one line.
extra_space = "\n "
ret_string = " return " + fn_base + suffix + "<" + is_weight_positional + ">("
if len(ret_string) <= 80:
code.append(ret_string)
else:
code.append(" return " + fn_base + suffix + "<" + extra_space + is_weight_positional + ">(")
code.append(" block_size,")
code.append(" output_size,")
code.append(" index_size,")
code.append(" data_size,")
code.append(" input,")
code.append(" indices,")
if opts.use_offsets:
code.append(" offsets,")
else:
code.append(" lengths,")
code.append(" weights,")
if not opts.fused:
code.append(" scale_bias,")
code.append(" normalize_by_lengths,")
code.append(" out);")
code.append("}")
code.append("")
code.append("} // namespace caffe2")
with open(filename, "w") as fout:
for c in code:
# print(c, file = fout)
fout.write(c + "\n")
print("Created " + filename)
|
pytorch-master
|
caffe2/perfkernels/hp_emblookup_codegen.py
|
pytorch-master
|
caffe2/experiments/__init__.py
|
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFunHash(hu.HypothesisTestCase):
@given(n_out=st.integers(min_value=5, max_value=20),
n_in=st.integers(min_value=10, max_value=20),
n_data=st.integers(min_value=2, max_value=8),
n_weight=st.integers(min_value=8, max_value=15),
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
weight = np.random.rand(n_weight).astype(np.float32)
alpha = np.random.rand(n_alpha).astype(np.float32)
val = val.astype(np.float32)
key = key.astype(np.int64)
seg = seg.astype(np.int32)
op = core.CreateOperator(
'SparseFunHash',
['val', 'key', 'seg', 'weight', 'alpha'],
['out'],
num_outputs=n_out)
# Gradient check wrt weight
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 3, [0])
# Gradient check wrt alpha
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 4, [0])
op2 = core.CreateOperator(
'SparseFunHash',
['val', 'key', 'seg', 'weight'],
['out'],
num_outputs=n_out)
# Gradient check wrt weight
self.assertGradientChecks(
gc, op2, [val, key, seg, weight], 3, [0])
|
pytorch-master
|
caffe2/experiments/python/sparse_funhash_op_test.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
class TestTTPad(hu.HypothesisTestCase):
@given(K=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=10, max_value=20),
N=st.integers(min_value=10, max_value=20),
**hu.gcs)
def test_tt_pad(self, K, M, N, gc, dc):
op = core.CreateOperator(
'TTPad',
['A'],
['A', 'dim0'],
scale=(K))
A = np.random.rand(M, N).astype(np.float32)
workspace.FeedBlob('A', A)
workspace.RunOperatorOnce(op)
def tt_pad_ref(A_):
M_ = A_.shape[0]
if M_ % K == 0:
new_dim0 = M_
else:
new_dim0 = (M_ // K + 1) * K
return (np.vstack((A_, np.zeros((new_dim0 - M_, A_.shape[1])))),
np.array([A.shape[0]]))
# Check against numpy reference
self.assertReferenceChecks(gc, op, [A], tt_pad_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [A], [0])
# Gradient check wrt A
self.assertGradientChecks(gc, op, [A], 0, [0])
|
pytorch-master
|
caffe2/experiments/python/tt_pad_op_test.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFunHash(hu.HypothesisTestCase):
@given(n_out=st.integers(min_value=5, max_value=20),
n_in=st.integers(min_value=10, max_value=20),
n_data=st.integers(min_value=2, max_value=8),
n_weight=st.integers(min_value=8, max_value=15),
n_alpha=st.integers(min_value=3, max_value=8),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs)
def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity,
gc, dc):
A = np.random.rand(n_data, n_in)
A[A > sparsity] = 0
A_coo = coo_matrix(A)
val, key, seg = A_coo.data, A_coo.col, A_coo.row
weight = np.random.rand(n_weight).astype(np.float32)
alpha = np.random.rand(n_alpha).astype(np.float32)
val = val.astype(np.float32)
key = key.astype(np.int64)
seg = seg.astype(np.int32)
op = core.CreateOperator(
'FunHash',
['val', 'key', 'seg', 'weight', 'alpha'],
['out'],
num_outputs=n_out)
# Check over multiple devices
self.assertDeviceChecks(
dc, op, [val, key, seg, weight, alpha], [0])
# Gradient check wrt weight
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 3, [0])
# Gradient check wrt alpha
self.assertGradientChecks(
gc, op, [val, key, seg, weight, alpha], 4, [0])
op2 = core.CreateOperator(
'FunHash',
['val', 'key', 'seg', 'weight'],
['out'],
num_outputs=n_out)
# Check over multiple devices
self.assertDeviceChecks(
dc, op2, [val, key, seg, weight], [0])
# Gradient check wrt weight
self.assertGradientChecks(
gc, op2, [val, key, seg, weight], 3, [0])
|
pytorch-master
|
caffe2/experiments/python/funhash_op_test.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import numpy as np
from scipy.sparse import coo_matrix
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
def test_reshape(old_shape, new_shape, stride_only=False):
blob_in0 = 'col'
blob_out0 = 'col_out'
blob_in1 = 'row'
blob_out1 = 'row_out'
old_shape_for_op = (-1, old_shape[1]) if stride_only else old_shape
op = core.CreateOperator('SparseMatrixReshape',
[blob_in0, blob_in1],
[blob_out0, blob_out1],
old_shape=old_shape_for_op,
new_shape=new_shape)
A = np.random.random_sample(old_shape)
A[np.random.random_sample(old_shape) > .5] = 0
A_coo = coo_matrix(A)
old_row, old_col = A_coo.row, A_coo.col
workspace.FeedBlob(blob_in0, old_col.astype(np.int64))
workspace.FeedBlob(blob_in1, old_row.astype(np.int32))
workspace.RunOperatorOnce(op)
A_new_coo = coo_matrix(A.reshape(new_shape))
new_row, new_col = A_new_coo.row, A_new_coo.col
col_out = workspace.FetchBlob(blob_out0)
row_out = workspace.FetchBlob(blob_out1)
np.testing.assert_array_equal(col_out, new_col)
np.testing.assert_array_equal(row_out, new_row)
class TestSparseMatrixReshapeOp(TestCase):
def test_basic_reshape(self):
test_reshape(old_shape=(3, 4), new_shape=(4, 3))
def test_missing_dim(self):
test_reshape(old_shape=(2, 8), new_shape=(-1, 4))
def test_stride_only(self):
test_reshape(old_shape=(2, 8), new_shape=(-1, 4), stride_only=True)
def test_sparse_reshape_mm(self):
M, N, K = 300, 400, 500
A = np.random.rand(M, K).astype(np.float32)
A_sparse = A * (np.random.rand(*A.shape) > .5)
A_sparse = A_sparse.reshape((K, M))
A_coo = coo_matrix(A_sparse)
idx0, idx1, a = A_coo.row, A_coo.col, A_coo.data
B = np.random.rand(K, N).astype(np.float32)
workspace.FeedBlob('col', idx1.astype(np.int64))
workspace.FeedBlob('row', idx0.astype(np.int32))
workspace.FeedBlob('B', B)
workspace.FeedBlob('a', a)
reshape_op = core.CreateOperator(
'SparseMatrixReshape',
['col', 'row'],
['new_col', 'new_row'],
old_shape=(K, M),
new_shape=(M, K))
mm_op = core.CreateOperator(
'SparseUnsortedSegmentWeightedSum',
['B', 'a', 'new_col', 'new_row'],
['Y'])
workspace.RunOperatorOnce(reshape_op)
workspace.RunOperatorOnce(mm_op)
Y = workspace.FetchBlob('Y')
np.testing.assert_allclose(A_sparse.reshape(M, K).dot(B), Y,
rtol=1e-4)
|
pytorch-master
|
caffe2/experiments/python/sparse_reshape_op_test.py
|
pytorch-master
|
caffe2/experiments/python/__init__.py
|
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package convnet_benchmarks
# Module caffe2.experiments.python.convnet_benchmarks
"""
Benchmark for common convnets.
(NOTE: Numbers below prior with missing parameter=update step, TODO to update)
Speed on Titan X, with 10 warmup steps and 10 main steps and with different
versions of cudnn, are as follows (time reported below is per-batch time,
forward / forward+backward):
CuDNN V3 CuDNN v4
AlexNet 32.5 / 108.0 27.4 / 90.1
OverFeat 113.0 / 342.3 91.7 / 276.5
Inception 134.5 / 485.8 125.7 / 450.6
VGG (batch 64) 200.8 / 650.0 164.1 / 551.7
Speed on Inception with varied batch sizes and CuDNN v4 is as follows:
Batch Size Speed per batch Speed per image
16 22.8 / 72.7 1.43 / 4.54
32 38.0 / 127.5 1.19 / 3.98
64 67.2 / 233.6 1.05 / 3.65
128 125.7 / 450.6 0.98 / 3.52
Speed on Tesla M40, which 10 warmup steps and 10 main steps and with cudnn
v4, is as follows:
AlexNet 68.4 / 218.1
OverFeat 210.5 / 630.3
Inception 300.2 / 1122.2
VGG (batch 64) 405.8 / 1327.7
(Note that these numbers involve a "full" backprop, i.e. the gradient
with respect to the input image is also computed.)
To get the numbers, simply run:
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL --forward_only True
done
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL
done
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA
for BS in 16 32 64 128; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception
done
Note that VGG needs to be run at batch 64 due to memory limit on the backward
pass.
"""
import argparse
import time
from caffe2.python import cnn, workspace, core
import caffe2.python.SparseTransformer as SparseTransformer # type: ignore[import]
def MLP(order):
model = cnn.CNNModelHelper()
d = 256
depth = 20
width = 3
for i in range(depth):
for j in range(width):
current = "fc_{}_{}".format(i, j) if i > 0 else "data"
next_ = "fc_{}_{}".format(i + 1, j)
model.FC(
current, next_,
dim_in=d, dim_out=d,
weight_init=model.XavierInit,
bias_init=model.XavierInit)
model.Sum(["fc_{}_{}".format(depth, j)
for j in range(width)], ["sum"])
model.FC("sum", "last",
dim_in=d, dim_out=1000,
weight_init=model.XavierInit,
bias_init=model.XavierInit)
xent = model.LabelCrossEntropy(["last", "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, d
def AlexNet(order):
model = cnn.CNNModelHelper(order, name="alexnet",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4,
pad=2
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2)
conv2 = model.Conv(
pool1,
"conv2",
64,
192,
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
192,
384,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
384,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
conv5 = model.Conv(
relu4,
"conv5",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
pool5 = model.MaxPool(relu5, "pool5", kernel=3, stride=2)
fc6 = model.FC(
pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = model.Relu(fc6, "fc6")
fc7 = model.FC(
relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = model.Relu(fc7, "fc7")
fc8 = model.FC(
relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 224
def OverFeat(order):
model = cnn.CNNModelHelper(order, name="overfeat",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
96,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
conv2 = model.Conv(
pool1, "conv2", 96, 256, 5, ('XavierFill', {}), ('ConstantFill', {})
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
512,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
conv5 = model.Conv(
relu4,
"conv5",
1024,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
pool5 = model.MaxPool(relu5, "pool5", kernel=2, stride=2)
fc6 = model.FC(
pool5, "fc6", 1024 * 6 * 6, 3072, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = model.Relu(fc6, "fc6")
fc7 = model.FC(
relu6, "fc7", 3072, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = model.Relu(fc7, "fc7")
fc8 = model.FC(
relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 231
def VGGA(order):
model = cnn.CNNModelHelper(order, name='vgg-a',
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
conv2 = model.Conv(
pool1,
"conv2",
64,
128,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
128,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
pool4 = model.MaxPool(relu4, "pool4", kernel=2, stride=2)
conv5 = model.Conv(
pool4,
"conv5",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
conv6 = model.Conv(
relu5,
"conv6",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu6 = model.Relu(conv6, "conv6")
pool6 = model.MaxPool(relu6, "pool6", kernel=2, stride=2)
conv7 = model.Conv(
pool6,
"conv7",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu7 = model.Relu(conv7, "conv7")
conv8 = model.Conv(
relu7,
"conv8",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu8 = model.Relu(conv8, "conv8")
pool8 = model.MaxPool(relu8, "pool8", kernel=2, stride=2)
fcix = model.FC(
pool8, "fcix", 512 * 7 * 7, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
reluix = model.Relu(fcix, "fcix")
fcx = model.FC(
reluix, "fcx", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relux = model.Relu(fcx, "fcx")
fcxi = model.FC(
relux, "fcxi", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fcxi, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 231
def net_DAG_Builder(model):
print("====================================================")
print(" Start Building DAG ")
print("====================================================")
net_root = SparseTransformer.netbuilder(model)
return net_root
def _InceptionModule(
model, input_blob, input_depth, output_name, conv1_depth, conv3_depths,
conv5_depths, pool_depth
):
# path 1: 1x1 conv
conv1 = model.Conv(
input_blob, output_name + ":conv1", input_depth, conv1_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
conv1 = model.Relu(conv1, conv1)
# path 2: 1x1 conv + 3x3 conv
conv3_reduce = model.Conv(
input_blob, output_name +
":conv3_reduce", input_depth, conv3_depths[0],
1, ('XavierFill', {}), ('ConstantFill', {})
)
conv3_reduce = model.Relu(conv3_reduce, conv3_reduce)
conv3 = model.Conv(
conv3_reduce,
output_name + ":conv3",
conv3_depths[0],
conv3_depths[1],
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
conv3 = model.Relu(conv3, conv3)
# path 3: 1x1 conv + 5x5 conv
conv5_reduce = model.Conv(
input_blob, output_name +
":conv5_reduce", input_depth, conv5_depths[0],
1, ('XavierFill', {}), ('ConstantFill', {})
)
conv5_reduce = model.Relu(conv5_reduce, conv5_reduce)
conv5 = model.Conv(
conv5_reduce,
output_name + ":conv5",
conv5_depths[0],
conv5_depths[1],
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
conv5 = model.Relu(conv5, conv5)
# path 4: pool + 1x1 conv
pool = model.MaxPool(
input_blob,
output_name + ":pool",
kernel=3,
stride=1,
pad=1
)
pool_proj = model.Conv(
pool, output_name + ":pool_proj", input_depth, pool_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
pool_proj = model.Relu(pool_proj, pool_proj)
output = model.Concat([conv1, conv3, conv5, pool_proj], output_name)
return output
def Inception(order):
model = cnn.CNNModelHelper(order, name="inception",
use_cudnn=True, cudnn_exhaustive_search=True)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
7,
('XavierFill', {}),
('ConstantFill', {}),
stride=2,
pad=3
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2, pad=1)
conv2a = model.Conv(
pool1, "conv2a", 64, 64, 1, ('XavierFill', {}), ('ConstantFill', {})
)
conv2a = model.Relu(conv2a, conv2a)
conv2 = model.Conv(
conv2a,
"conv2",
64,
192,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2, pad=1)
# Inception modules
inc3 = _InceptionModule(
model, pool2, 192, "inc3", 64, [96, 128], [16, 32], 32
)
inc4 = _InceptionModule(
model, inc3, 256, "inc4", 128, [128, 192], [32, 96], 64
)
pool5 = model.MaxPool(inc4, "pool5", kernel=3, stride=2, pad=1)
inc5 = _InceptionModule(
model, pool5, 480, "inc5", 192, [96, 208], [16, 48], 64
)
inc6 = _InceptionModule(
model, inc5, 512, "inc6", 160, [112, 224], [24, 64], 64
)
inc7 = _InceptionModule(
model, inc6, 512, "inc7", 128, [128, 256], [24, 64], 64
)
inc8 = _InceptionModule(
model, inc7, 512, "inc8", 112, [144, 288], [32, 64], 64
)
inc9 = _InceptionModule(
model, inc8, 528, "inc9", 256, [160, 320], [32, 128], 128
)
pool9 = model.MaxPool(inc9, "pool9", kernel=3, stride=2, pad=1)
inc10 = _InceptionModule(
model, pool9, 832, "inc10", 256, [160, 320], [32, 128], 128
)
inc11 = _InceptionModule(
model, inc10, 832, "inc11", 384, [192, 384], [48, 128], 128
)
pool11 = model.AveragePool(inc11, "pool11", kernel=7, stride=1)
fc = model.FC(
pool11, "fc", 1024, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
# It seems that Soumith's benchmark does not have softmax on top
# for Inception. We will add it anyway so we can have a proper
# backward pass.
pred = model.Softmax(fc, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, 224
def AddInput(model, batch_size, db, db_type):
"""Adds the data input part."""
data_uint8, label = model.TensorProtosDBInput(
[], ["data_uint8", "label"], batch_size=batch_size,
db=db, db_type=db_type
)
data = model.Cast(data_uint8, "data_nhwc", to=core.DataType.FLOAT)
data = model.NHWC2NCHW(data, "data")
data = model.Scale(data, data, scale=float(1. / 256))
data = model.StopGradient(data, data)
return data, label
def AddParameterUpdate(model):
""" Simple plain SGD update -- not tuned to actually train the models """
ITER = model.Iter("iter")
LR = model.LearningRate(
ITER, "LR", base_lr=-1e-8, policy="step", stepsize=10000, gamma=0.999)
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
for param in model.params:
param_grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, param_grad, LR], param)
def Benchmark(model_gen, arg):
model, input_size = model_gen(arg.order)
model.Proto().type = arg.net_type
model.Proto().num_workers = arg.num_workers
# In order to be able to run everything without feeding more stuff, let's
# add the data and label blobs to the parameter initialization net as well.
if arg.order == "NCHW":
input_shape = [arg.batch_size, 3, input_size, input_size]
else:
input_shape = [arg.batch_size, input_size, input_size, 3]
if arg.model == "MLP":
input_shape = [arg.batch_size, input_size]
model.param_init_net.GaussianFill(
[],
"data",
shape=input_shape,
mean=0.0,
std=1.0
)
model.param_init_net.UniformIntFill(
[],
"label",
shape=[arg.batch_size, ],
min=0,
max=999
)
if arg.forward_only:
print('{}: running forward only.'.format(arg.model))
else:
print('{}: running forward-backward.'.format(arg.model))
model.AddGradientOperators(["loss"])
AddParameterUpdate(model)
if arg.order == 'NHWC':
print(
'==WARNING==\n'
'NHWC order with CuDNN may not be supported yet, so I might\n'
'exit suddenly.'
)
if not arg.cpu:
model.param_init_net.RunAllOnGPU()
model.net.RunAllOnGPU()
if arg.dump_model:
# Writes out the pbtxt for benchmarks on e.g. Android
with open(
"{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w"
) as fid:
fid.write(str(model.param_init_net.Proto()))
with open("{0}.pbtxt".format(arg.model), "w") as fid:
fid.write(str(model.net.Proto()))
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
for i in range(arg.warmup_iterations):
workspace.RunNet(model.net.Proto().name)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("run", model.net, arg.iterations))
start = time.time()
workspace.RunPlan(plan)
print('Spent: {}'.format((time.time() - start) / arg.iterations))
if arg.layer_wise_benchmark:
print('Layer-wise benchmark.')
workspace.BenchmarkNet(model.net.Proto().name, 1, arg.iterations, True)
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size."
)
parser.add_argument("--model", type=str, help="The model to benchmark.")
parser.add_argument(
"--order",
type=str,
default="NCHW",
help="The order to evaluate."
)
parser.add_argument(
"--cudnn_ws",
type=int,
default=-1,
help="The cudnn workspace size."
)
parser.add_argument(
"--iterations",
type=int,
default=10,
help="Number of iterations to run the network."
)
parser.add_argument(
"--warmup_iterations",
type=int,
default=10,
help="Number of warm-up iterations before benchmarking."
)
parser.add_argument(
"--forward_only",
action='store_true',
help="If set, only run the forward pass."
)
parser.add_argument(
"--layer_wise_benchmark",
action='store_true',
help="If True, run the layer-wise benchmark as well."
)
parser.add_argument(
"--cpu",
action='store_true',
help="If True, run testing on CPU instead of GPU."
)
parser.add_argument(
"--dump_model",
action='store_true',
help="If True, dump the model prototxts to disk."
)
parser.add_argument("--net_type", type=str, default="dag")
parser.add_argument("--num_workers", type=int, default=2)
return parser
if __name__ == '__main__':
args = GetArgumentParser().parse_args()
if (
not args.batch_size or not args.model or not args.order or
not args.cudnn_ws
):
GetArgumentParser().print_help()
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
model_map = {
'AlexNet': AlexNet,
'OverFeat': OverFeat,
'VGGA': VGGA,
'Inception': Inception,
'MLP': MLP,
}
Benchmark(model_map[args.model], args)
|
pytorch-master
|
caffe2/experiments/python/convnet_benchmarks.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package device_reduce_sum_bench
# Module caffe2.experiments.python.device_reduce_sum_bench
import argparse
import itertools
import logging
import os
from six import add_metaclass
import numpy as np
from caffe2.python import workspace, core
from caffe2.python.hypothesis_test_util import runOpBenchmark, gpu_do
logging.basicConfig()
logger = logging.getLogger(os.path.basename(__file__))
logger.setLevel(logging.INFO)
ALL_BENCHMARKS = {}
class BenchmarkMeta(type):
def __new__(metacls, name, bases, class_dict):
cls = type.__new__(metacls, name, bases, class_dict)
if name != 'Benchmark':
ALL_BENCHMARKS[name] = cls
return cls
@add_metaclass(BenchmarkMeta)
class Benchmark(object):
def __init__(self):
self.results = []
def display(self):
print('Results ({}):'.format(type(self).__name__))
print('input size ms/iter')
print('------------------------------ -----------')
for size, ms in self.results:
print('{!s:<30} {:.4f}'.format(size, ms))
class SumElements(Benchmark):
def run(self):
op = core.CreateOperator(
"SumElements",
["X"],
["y"]
)
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
class SumSqrElements(Benchmark):
def run(self):
op = core.CreateOperator(
"SumSqrElements",
["X"],
["y"]
)
for n in itertools.imap(pow, itertools.cycle([10]), range(10)):
X = np.random.rand(n).astype(np.float32)
logger.info('Running benchmark for n = {}'.format(n))
ret = runOpBenchmark(gpu_do, op, inputs=[X])
self.results.append((n, ret[1]))
class SoftMaxWithLoss(Benchmark):
def run(self):
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"],
)
for n in itertools.imap(pow, itertools.cycle([10]), range(8)):
for D in itertools.imap(pow, itertools.cycle([10]), range(3)):
X = np.random.rand(n, D).astype(np.float32)
label = (np.random.rand(n) * D).astype(np.int32)
logger.info('Running benchmark for n = {}, D= {}'.format(n, D))
ret = runOpBenchmark(gpu_do, op, inputs=[X, label])
self.results.append(((n, D), ret[1]))
def parse_args():
parser = argparse.ArgumentParser(os.path.basename(__file__))
parser.add_argument('-b', '--benchmarks', nargs='+',
default=ALL_BENCHMARKS.keys(),
help='benchmarks to run (default: %(default)s))')
return parser.parse_args()
def main():
args = parse_args()
benchmarks = [ALL_BENCHMARKS[name]() for name in args.benchmarks]
for bench in benchmarks:
bench.run()
for bench in benchmarks:
bench.display()
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main()
|
pytorch-master
|
caffe2/experiments/python/device_reduce_sum_bench.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
class TestTTContraction(hu.HypothesisTestCase):
@given(D=st.integers(min_value=5, max_value=20),
K=st.integers(min_value=5, max_value=20),
M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
**hu.gcs)
def test_tt_contraction(self, D, K, M, N, gc, dc):
A = np.random.rand(K, M).astype(np.float32)
B = np.random.rand(D, K, N).astype(np.float32)
workspace.FeedBlob('A', A)
workspace.FeedBlob('B', B)
op = core.CreateOperator(
'TTContraction',
['A', 'B'],
['C'],
K=K,
M=M,
N=N)
workspace.RunOperatorOnce(op)
def tt_contraction_ref(A_, B_):
return ((A_[:, :, np.newaxis] * B_[:, :, np.newaxis, :])
.sum(axis=1).flatten()),
# Check against numpy reference
self.assertReferenceChecks(gc, op, [A, B], tt_contraction_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [A, B], [0])
# Gradient check wrt A
self.assertGradientChecks(gc, op, [A, B], 0, [0])
# Gradient check wrt B
self.assertGradientChecks(gc, op, [A, B], 1, [0])
|
pytorch-master
|
caffe2/experiments/python/tt_contraction_op_test.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package SparseTransformer
# Module caffe2.experiments.python.SparseTransformer
from caffe2.python import workspace
import scipy.sparse
class NetDefNode():
def __init__(self, name, optype, p=None, op=None):
self.name = name
self.optype = optype
self.ops = {}
self.prev = {}
self.insertInput(p)
self.visited = False
self.op = op
def insertInput(self, p):
"""
Insert input of this op
also maintain the output of previous op
p: a node or a list of node
"""
if isinstance(p, list):
for i in p:
self.prev[i.name] = i
i.ops[self.name] = self
elif isinstance(p, NetDefNode):
self.prev[p.name] = p
p.ops[self.name] = self
def deleteInput(self, p):
if isinstance(p, NetDefNode):
del self.prev[p.name]
del p.ops[self.name]
def maskNallocate(weight_name):
"""
Combine mask and weights
create wcsr, iw, jw, return their names
"""
w = workspace.FetchBlob(weight_name)
w_csr = scipy.sparse.csr_matrix(w)
wcsr = w_csr.data
iw = w_csr.indptr
jw = w_csr.indices
workspace.FeedBlob(weight_name + "wcsr", wcsr)
workspace.FeedBlob(weight_name + "iw", iw)
workspace.FeedBlob(weight_name + "jw", jw)
return weight_name + "wcsr", weight_name + "iw", weight_name + "jw"
def transFCRelu(cur, id2node, name2id, ops, model):
"""
Add trans before and after this FC_Prune->(Relu)->FC_Prune chain.
"""
# 1. add trans before the start of this chain
# assuming that cur is a FC_Prune, and it has only one input
pre = cur.prev.itervalues().next()
# Create a node /op and insert it.
# TODO(wyiming): check whether it is correct here
current_blob = model.Transpose(cur.op.input[0], cur.op.input[0] + "_trans")
# print model.net.Proto()
trans_op = model.net.Proto().op[-1]
trans_node = NetDefNode(trans_op.output[0], "Transpose", pre, trans_op)
trans_node.visited = True
pre_new = trans_node
# 2. use while loop to visit the chain
while True:
# breakup with the parent
cur.deleteInput(pre)
if not (cur.optype == "FC_Prune" or cur.optype == "Relu"):
print("Reaching the end of the chain")
break
if len(cur.ops) > 1:
print("A FC/Relu giving more than 1 useful outputs")
if cur.optype == "FC_Prune":
op = cur.op
wcsr, iw, jw = maskNallocate(op.input[1])
bias_name = op.input[3]
# TODO(wyiming): create a new Op here
current_blob = model.FC_Sparse(current_blob,
cur.op.output[0] + "_Sparse",
wcsr, iw, jw, bias_name)
sps_op = model.net.Proto().op[-1]
sps_node = NetDefNode(cur.op.output[0] + "_Sparse",
"FC_Sparse",
pre_new, sps_op)
sps_node.visited = True
pre_new = sps_node
if cur.optype == "Relu":
op = cur.op
current_blob = model.Relu(current_blob, current_blob)
rel_op = model.net.Proto().op[-1]
rel_node = NetDefNode(str(current_blob), "Relu",
pre_new, rel_op)
rel_node.visited = True
pre_new = rel_node
cur.visited = True
pre = cur
flag = False
for _, temp in cur.ops.iteritems():
if temp.optype == "Relu" or temp.optype == "FC_Prune":
flag = True
cur = temp
if not flag:
# assume that there is only 1 output that is not PrintOP
cur = cur.ops.itervalues().next()
cur.deleteInput(pre)
print("No FC/RElu children")
print(cur.op.type)
break
# 3. add trans after this chain like 1.
current_blob = model.Transpose(current_blob, pre.op.output[0])
trans_op = model.net.Proto().op[-1]
trans_node = NetDefNode(str(current_blob), "Transpose", pre_new, trans_op)
trans_node.visited = True
cur.insertInput(trans_node)
print(cur.prev)
print(trans_node.ops)
def Prune2Sparse(cur, id2node, name2id, ops, model):
# Assume that FC and Relu takes in only 1 input;
# If not raise warning
if not cur.visited and cur.optype == "FC_Prune":
transFCRelu(cur, id2node, name2id, ops, model)
cur.visited = True
for name, n in cur.ops.iteritems():
Prune2Sparse(n, id2node, name2id, ops, model)
def net2list(net_root):
"""
Use topological order(BFS) to print the op of a net in a list
"""
bfs_queue = []
op_list = []
cur = net_root
for _, n in cur.ops.iteritems():
bfs_queue.append(n)
while bfs_queue:
node = bfs_queue[0]
bfs_queue = bfs_queue[1:]
op_list.append(node.op)
for _, n in node.ops.iteritems():
bfs_queue.append(n)
return op_list
def netbuilder(model):
print("Welcome to model checker")
proto = model.net.Proto()
net_name2id = {}
net_id2node = {}
net_root = NetDefNode("net_root", "root", None)
for op_id, op in enumerate(proto.op):
if op.type == "Print":
continue
op_name = '%s/%s (op#%d)' % (op.name, op.type, op_id) \
if op.name else '%s (op#%d)' % (op.type, op_id)
# print(op_name)
op_node = NetDefNode(op_name, op.type, op=op)
net_id2node[op_id] = op_node
if_has_layer_input = False
for input_name in op.input:
if input_name not in net_name2id:
# assume that un_occured name are non_layers
# TODO: write a non-layer checker and log it
continue
op_node.insertInput(net_id2node[net_name2id[input_name]])
if_has_layer_input = True
if not if_has_layer_input:
op_node.insertInput(net_root)
for output_name in op.output:
net_name2id[output_name] = op_id
return net_root, net_name2id, net_id2node
|
pytorch-master
|
caffe2/experiments/python/SparseTransformer.py
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
## @package net_construct_bench
# Module caffe2.experiments.python.net_construct_bench
import argparse
import logging
import time
from caffe2.python import workspace, data_parallel_model
from caffe2.python import cnn
import caffe2.python.models.resnet as resnet
'''
Simple benchmark that creates a data-parallel resnet-50 model
and measures the time.
'''
logging.basicConfig()
log = logging.getLogger("net_construct_bench")
log.setLevel(logging.DEBUG)
def AddMomentumParameterUpdate(train_model, LR):
'''
Add the momentum-SGD update.
'''
params = train_model.GetParams()
assert(len(params) > 0)
ONE = train_model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
NEGONE = train_model.param_init_net.ConstantFill(
[], 'NEGONE', shape=[1], value=-1.0,
)
for param in params:
param_grad = train_model.param_to_grad[param]
param_momentum = train_model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
# Update param_grad and param_momentum in place
train_model.net.MomentumSGD(
[param_grad, param_momentum, LR],
[param_grad, param_momentum],
momentum=0.9,
nesterov=1
)
# Update parameters by applying the moment-adjusted gradient
train_model.WeightedSum(
[param, ONE, param_grad, NEGONE],
param
)
def Create(args):
gpus = list(range(args.num_gpus))
log.info("Running on gpus: {}".format(gpus))
# Create CNNModeLhelper object
train_model = cnn.CNNModelHelper(
order="NCHW",
name="resnet50",
use_cudnn=True,
cudnn_exhaustive_search=False
)
# Model building functions
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
num_labels=1000,
label="label",
)
model.Accuracy([softmax, "label"], "accuracy")
return [loss]
# SGD
def add_parameter_update_ops(model):
model.AddWeightDecay(1e-4)
ITER = model.Iter("ITER")
stepsz = int(30)
LR = model.net.LearningRate(
[ITER],
"LR",
base_lr=0.1,
policy="step",
stepsize=stepsz,
gamma=0.1,
)
AddMomentumParameterUpdate(model, LR)
def add_image_input(model):
pass
start_time = time.time()
# Create parallelized model
data_parallel_model.Parallelize_GPU(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnet50_model_ops,
param_update_builder_fun=add_parameter_update_ops,
devices=gpus,
)
ct = time.time() - start_time
train_model.net._CheckLookupTables()
log.info("Model create for {} gpus took: {} secs".format(len(gpus), ct))
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
description="Caffe2: Benchmark for net construction"
)
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of GPUs.")
args = parser.parse_args()
Create(args)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
import cProfile
cProfile.run('main()', sort="cumulative")
|
pytorch-master
|
caffe2/experiments/python/net_construct_bench.py
|
pytorch-master
|
caffe2/contrib/__init__.py
|
|
pytorch-master
|
caffe2/contrib/nnpack/__init__.py
|
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume, settings
import numpy as np
import time
import os
from caffe2.python import core, dyndep
import caffe2.python.hypothesis_test_util as hu
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/nnpack:nnpack_ops")
np.random.seed(1)
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
ws.run(plan)
after = time.time()
print("Timing network, time taken per-iteration: {:.6f}ms".format((
after - before) / float(iters) * 1000.0))
return after - before
def has_avx2():
import subprocess
try:
subprocess.check_output(["grep", "avx2", "/proc/cpuinfo"])
return True
except subprocess.CalledProcessError:
# grep exits with rc 1 on no matches
return False
@unittest.skipIf(not has_avx2(), "NNPACK requires AVX2")
class NNPackOpsTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 2),
kernel=st.integers(3, 5),
size=st.integers(5, 10),
input_channels=st.integers(1, 8),
batch_size=st.integers(1, 5),
groups=st.integers(1, 2))
def test_convolution_correctness(self, stride, pad, kernel, size,
input_channels,
batch_size, groups):
input_channels *= groups
output_channels = int(input_channels / groups)
assume(input_channels % groups == 0)
assume(output_channels % groups == 0)
assume(output_channels == input_channels / groups)
assume(stride <= kernel)
if stride != 1:
assume(batch_size == 1)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(
input_channels, output_channels, kernel, kernel).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
order = "NCHW"
outputs = {}
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"Conv",
["X", "w", "b"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
kts="TUPLE",
engine=engine,
group=groups,
)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("w").feed(w)
self.ws.create_blob("b").feed(b)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@given(size=st.sampled_from([6, 8]),
input_channels=st.integers(1, 8),
batch_size=st.integers(1, 5))
def test_max_pool_correctness(self, size, input_channels, batch_size):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
order = "NCHW"
outputs = {}
# only 2 * 2 stride and 2 * 2 pool is supported in NNPack now
stride = 2
kernel = 2
# The pooling strategy of NNPack is different from caffe2 pooling
pad = 0
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"MaxPool",
["X"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@given(size=st.sampled_from([6, 8]),
input_channels=st.integers(1, 8),
batch_size=st.integers(1, 5))
def test_relu_correctness(self, size, input_channels, batch_size):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
outputs = {}
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"Relu",
["X"],
["Y"],
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@given(size=st.sampled_from([6, 8]),
input_channels=st.integers(1, 8),
batch_size=st.integers(1, 5),
alpha=st.floats(0, 1))
def test_leaky_relu_correctness(self, size, input_channels, batch_size,
alpha):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
outputs = {}
for engine in ["", "NNPACK"]:
op = core.CreateOperator(
"LeakyRelu",
["X"],
["Y"],
alpha=alpha,
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(op)
outputs[engine] = self.ws.blobs["Y"].fetch()
np.testing.assert_allclose(
outputs[""],
outputs["NNPACK"],
atol=1e-4,
rtol=1e-4)
@settings(deadline=3600)
@unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark")
@given(stride=st.integers(1, 1),
pad=st.integers(0, 2),
kernel=st.sampled_from([3, 5, 7]),
size=st.integers(30, 90),
input_channels=st.sampled_from([3, 64, 256]),
output_channels=st.sampled_from([32, 96, 256]),
batch_size=st.sampled_from([32, 64, 96, 128]))
def test_timings(self, stride, pad, kernel, size,
input_channels, output_channels, batch_size):
assume(stride <= kernel)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
w = np.random.rand(output_channels, input_channels,
kernel, kernel).astype(np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
order = "NCHW"
times = {}
for engine in ["", "NNPACK"]:
net = core.Net(engine + "_test")
net.Conv(
["X", "W", "b"], "Y",
order=order,
kernel=kernel,
stride=stride,
pad=pad,
kts="TUPLE",
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("W").feed(w)
self.ws.create_blob("b").feed(b)
self.ws.run(net)
times[engine] = benchmark(self.ws, net)
print("Speedup for NNPACK: {:.2f}".format(
times[""] / times["NNPACK"]))
@settings(deadline=3600)
@unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark")
@given(size=st.integers(30, 90),
input_channels=st.sampled_from([3, 64, 256]),
batch_size=st.sampled_from([32, 64, 96, 128]))
def test_relu_timings(self, size, input_channels, batch_size):
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
times = {}
for engine in ["", "NNPACK"]:
net = core.Net(engine + "_test")
net.Relu(
["X"],
["Y"],
engine=engine,
)
self.ws.create_blob("X").feed(X)
self.ws.run(net)
times[engine] = benchmark(self.ws, net)
print("Speedup for NNPACK: {:.2f}".format(
times[""] / times["NNPACK"]))
|
pytorch-master
|
caffe2/contrib/nnpack/nnpack_ops_test.py
|
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, dyndep, test_util
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/warpctc:ctc_ops')
workspace.GlobalInit(["python"])
def softmax(w):
maxes = np.amax(w, axis=-1, keepdims=True)
e = np.exp(w - maxes)
dist = e / np.sum(e, axis=-1, keepdims=True)
return dist
class CTCOpsTest(test_util.TestCase):
def verify_cost(self, device_option, is_test, skip_input_lengths=False):
alphabet_size = 5
N = 1
T = 2
inputs = np.asarray(
[
[[0.1, 0.6, 0.1, 0.1, 0.1]],
[[0.1, 0.1, 0.6, 0.1, 0.1]],
]
).reshape(T, N, alphabet_size).astype(np.float32)
labels = np.asarray([1, 2]).astype(np.int32).reshape(T)
label_lengths = np.asarray([2]).astype(np.int32).reshape(N)
input_lengths = np.asarray([T]).astype(np.int32)
net = core.Net("test-net")
input_blobs = ["inputs", "labels", "label_lengths"]
if not skip_input_lengths:
input_blobs.append("input_lengths")
output_blobs = ["costs", "workspace"] if is_test \
else ["inputs_grad_to_be_copied", "costs", "workspace"]
net.CTC(input_blobs,
output_blobs,
is_test=is_test,
device_option=device_option)
if not is_test:
net.AddGradientOperators(["costs"])
self.ws.create_blob("inputs").feed(inputs, device_option=device_option)
self.ws.create_blob("labels").feed(labels)
self.ws.create_blob("label_lengths").feed(label_lengths)
if not skip_input_lengths:
self.ws.create_blob("input_lengths").feed(input_lengths)
self.ws.run(net)
probs = softmax(inputs)
expected = probs[0, 0, 1] * probs[1, 0, 2]
self.assertEqual(self.ws.blobs["costs"].fetch().shape, (N,))
self.assertEqual(self.ws.blobs["costs"].fetch().dtype, np.float32)
cost = self.ws.blobs["costs"].fetch()[0]
print(cost)
self.assertAlmostEqual(np.exp(-cost), expected)
if not is_test:
# Make sure inputs_grad was added by AddGradientOperators and
# it is equal to the inputs_grad_to_be_copied blob returned by CTCop
assert np.array_equal(
self.ws.blobs["inputs_grad"].fetch(),
self.ws.blobs["inputs_grad_to_be_copied"].fetch()
)
def test_ctc_cost_cpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU),
is_test=False)
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU),
is_test=False, skip_input_lengths=True)
def test_ctc_cost_gpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
device_id=0),
is_test=False)
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
device_id=0),
is_test=False,
skip_input_lengths=True)
def test_ctc_forward_only_cpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU),
is_test=True)
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU),
is_test=True,
skip_input_lengths=True)
def test_ctc_forward_only_gpu(self):
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
device_id=0),
is_test=True)
self.verify_cost(
caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA,
device_id=0),
is_test=True,
skip_input_lengths=True)
|
pytorch-master
|
caffe2/contrib/warpctc/ctc_ops_test.py
|
pytorch-master
|
caffe2/contrib/warpctc/__init__.py
|
|
pytorch-master
|
caffe2/contrib/nccl/__init__.py
|
|
import unittest
import hypothesis.strategies as st
from hypothesis import given, assume
import numpy as np
import time
import os
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, muji, dyndep
import caffe2.python.hypothesis_test_util as hu
np.random.seed(1)
dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/nccl:nccl_ops')
def gpu_device(i):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = workspace.GpuDeviceType
device_option.device_id = i
return device_option
def benchmark(ws, net, warmups=5, iters=100):
for _ in range(warmups):
ws.run(net)
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
before = time.time()
ws.run(plan)
after = time.time()
print("Timing network, time taken per-iteration: {:.6f}ms".format((
after - before) / float(iters) * 1000.0))
return after - before
@unittest.skipIf(not workspace.has_cuda_support, "NCCL only on CUDA GPU")
class NCCLOpsTest(hu.HypothesisTestCase):
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000),
in_place=st.booleans())
def test_nccl_allreduce(self, n, m, in_place):
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
prefix = "" if in_place else "o"
outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)]
op = core.CreateOperator("NCCLAllreduce", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def allreduce(*args):
assert len(args) == n
output = np.sum(args, axis=0)
return [output for _ in range(n)]
outputs = self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
allreduce, input_device_options)
for output in outputs:
np.testing.assert_array_equal(outputs[0], output)
self.assertEqual(outputs[0].tobytes(), output.tobytes())
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000),
root=st.integers(min_value=0,
max_value=workspace.NumGpuDevices() - 1))
def test_nccl_broadcast(self, n, m, root):
assume(root < n)
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLBroadcast", inputs, inputs, root=root)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def broadcast(*args):
assert len(args) == n
return [args[root] for _ in range(n)]
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
broadcast, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000),
# NCCL Reduce seems to deadlock for non-zero roots.
root=st.integers(min_value=0, max_value=0),
in_place=st.booleans())
def test_nccl_reduce(self, n, m, root, in_place):
assume(in_place is False or root == 0)
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
op = core.CreateOperator(
"NCCLReduce", inputs,
inputs[root] if in_place else b"o", root=root)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def reduce(*args):
assert len(args) == n
return [np.sum(args, axis=0)]
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
reduce, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000))
def test_nccl_allgather(self, n, m):
xs = [np.random.randn(m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
outputs = [str("o_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLAllGather", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def allgather(*args):
assert len(args) == n
return [np.stack(args, axis=0) for _ in range(n)]
outputs = self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
allgather, input_device_options)
for output in outputs:
np.testing.assert_array_equal(outputs[0], output)
self.assertEqual(outputs[0].tobytes(), output.tobytes())
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=1, max_value=1000))
def test_nccl_reduce_scatter(self, n, m):
xs = [np.random.randn(n, m).astype(np.float32) for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
outputs = [str("o_{}".format(i)) for i in range(n)]
op = core.CreateOperator("NCCLReduceScatter", inputs, outputs)
input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)}
def reduce_scatter(*args):
assert len(args) == n
reduced = sum(args)
assert len(reduced.shape) > 1
ref = [reduced[i, :] for i in range(n)]
return ref
self.assertReferenceChecks(
hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)],
reduce_scatter, input_device_options)
@given(n=st.integers(min_value=2, max_value=workspace.NumGpuDevices()),
m=st.integers(min_value=100000, max_value=100000),
iters=st.integers(min_value=1, max_value=100),
net_type=st.sampled_from(["dag", "async_dag", "simple"]))
def _test_nccl_sync(self, n, m, iters, net_type):
inputs = [str("x_{}".format(i)) for i in range(n)]
extra_inputs = [str("xe_{}".format(i)) for i in range(n)]
net = core.Net("asdf")
net.Proto().type = net_type
net.Proto().num_workers = n
for i in range(n):
net.ConstantFill([], inputs[i], shape=[m], value=0.0,
device_option=gpu_device(i))
net.ConstantFill([], extra_inputs[i], shape=[m], value=1.0,
device_option=gpu_device(i))
for _ in range(iters):
net.Sum([inputs[i], extra_inputs[i]], [inputs[i]],
device_option=gpu_device(i))
net.NCCLReduce(inputs, [inputs[0]], device_option=gpu_device(0))
self.ws.run(net)
np.testing.assert_array_equal(
self.ws.blobs[inputs[0]].fetch(),
np.full(shape=(m,), fill_value=iters * n, dtype=np.float32))
@unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark")
def test_timings(self):
for n in range(2, workspace.NumGpuDevices()):
for in_place in [False, True]:
xs = [np.random.randn(1e7).astype(np.float32)
for i in range(n)]
inputs = [str("x_{}".format(i)) for i in range(n)]
prefix = "" if in_place else "o"
outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)]
net = core.Net("test")
net.NCCLAllreduce(inputs, outputs)
net.RunAllOnGPU()
for i in range(n):
self.ws.create_blob(inputs[i]).feed(xs[i], gpu_device(i))
self.ws.run(net)
net_time = benchmark(self.ws, net)
vanilla = core.Net("vanilla")
muji.Allreduce(vanilla, inputs)
vanilla_time = benchmark(self.ws, vanilla)
print("Speedup for NCCL: {:.2f}".format(
vanilla_time / net_time))
|
pytorch-master
|
caffe2/contrib/nccl/nccl_ops_test.py
|
import numpy as np
import pickle
from collections import OrderedDict
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, scope
import logging
logging.basicConfig()
log = logging.getLogger("AnyExpOnTerm")
log.setLevel(logging.DEBUG)
def initialize_params_from_file(
model, weights_file, num_xpus, opts,
broadcast_computed_param=False, reset_epoch=False):
start_epoch, lr, best_metric = initialize_master_xpu_model_params(
model, weights_file, opts, reset_epoch)
broadcast_parameters(opts, model, num_xpus, broadcast_computed_param)
return start_epoch, lr, best_metric
def initialize_master_xpu_model_params(model, weights_file, opts, reset_epoch):
log.info("Initializing model params from file: {}".format(weights_file))
with open(weights_file, 'r') as fopen:
blobs = pickle.load(fopen)
if 'blobs' in blobs:
blobs = blobs['blobs']
start_epoch = 0
best_metric = float('-inf')
if 'epoch' in blobs:
log.info('epoch {} is found in model file'.format(blobs['epoch']))
if not reset_epoch:
start_epoch = blobs['epoch']
else:
log.info('Reset epoch')
else:
log.info('no epoch is found in model file')
lr = opts['model_param']['base_learning_rate']
if 'lr' in blobs:
lr = blobs['lr']
if 'best_metric' in blobs and not reset_epoch:
best_metric = blobs['best_metric']
if model is not None:
log.info('initialize model parameters using weights file: {}'.format(
weights_file
))
ws_blobs = workspace.Blobs()
unscoped_blob_names = OrderedDict()
for blob in model.GetAllParams():
unscoped_blob_names[unscope_name(str(blob))] = True
root_xpu_id = opts['distributed']['first_xpu_id']
device = opts['distributed']['device']
caffe2_pb2_DEVICE =\
caffe2_pb2.CUDA if opts['distributed']['device'] == 'gpu'\
else caffe2_pb2.CPU
with core.NameScope('{}_{}'.format(device, root_xpu_id)):
with core.DeviceScope(core.DeviceOption(caffe2_pb2_DEVICE, 0)):
for unscoped_blob_name in unscoped_blob_names.keys():
scoped_blob_name = scoped_name(unscoped_blob_name)
if unscoped_blob_name not in blobs:
log.info('{:s} not found'.format(unscoped_blob_name))
continue
log.info(
'{:s} loaded from weights file into: {:s}'.format(
unscoped_blob_name, scoped_blob_name
)
)
if scoped_blob_name in ws_blobs:
ws_blob = workspace.FetchBlob(scoped_blob_name)
if not ws_blob.shape == blobs[unscoped_blob_name].shape:
log.info(
('Workspace blob {} with shape {} does '
'not match weights file shape {}').format(
unscoped_blob_name, ws_blob.shape,
blobs[unscoped_blob_name].shape)
)
else:
workspace.FeedBlob(
scoped_blob_name,
blobs[unscoped_blob_name].astype(
np.float32, copy=False))
else:
log.info('Skip initializing model parameters from file: {}'.format(
weights_file
))
log.info('Complete initialize_master_xpu_model_params')
return start_epoch, lr, best_metric
def broadcast_parameters(opts, model, num_xpus, broadcast_computed_param=False):
if num_xpus == 1:
log.info("only 1 device. Skip parameter broadcast")
return
all_params = [model.GetParams()]
if broadcast_computed_param:
all_params.append(model.GetComputedParams())
caffe2_pb2_DEVICE =\
caffe2_pb2.CUDA if opts['distributed']['device'] == 'gpu'\
else caffe2_pb2.CPU
for params in all_params:
assert len(params) % num_xpus == 0, \
"Current model doesn't match device number when loading checkpoint"
params_per_xpu = int(len(params) / num_xpus)
for idx in range(params_per_xpu):
blobs = [param for param in params[idx::params_per_xpu]]
data = workspace.FetchBlob(blobs[0])
log.info('Broadcasting {} to'.format(str(blobs[0])))
for i, p in enumerate(blobs[1:]):
log.info(' |-> {}'.format(str(p)))
with core.DeviceScope(core.DeviceOption(caffe2_pb2_DEVICE, i+1)):
workspace.FeedBlob(p, data)
log.info("Complete parameter broadcast")
def save_model_params(is_checkpoint, model, checkpoint_path, epoch, opts, best_metric):
# best_metric=float('-inf')
if checkpoint_path is None:
return None
try:
save_model_params_blob(
model, checkpoint_path, epoch, opts, best_metric
)
except Exception as e:
log.warning('Exception from save_model_params {}'.format(str(e)))
return checkpoint_path
def save_model_params_blob(model, params_file, epoch, opts, best_metric):
# best_metric=float('-inf')
log.info("Saving model params...")
root_xpu_id = opts['distributed']['first_xpu_id']
device = opts['distributed']['device']
save_params = [str(param) for param in
model.GetParams('{}_{}'.format(device, root_xpu_id))]
save_computed_params = [str(param) for param in
model.GetComputedParams('{}_{}'
.format(device, root_xpu_id))]
save_blobs = {}
save_blobs['epoch'] = epoch
save_blobs['best_metric'] = best_metric
save_blobs['lr'] = \
workspace.FetchBlob('{}_{}/lr'.format(device, root_xpu_id))
for param in save_params + save_computed_params:
scoped_blob_name = str(param)
unscoped_blob_name = unscope_name(scoped_blob_name)
if unscoped_blob_name not in save_blobs:
save_blobs[unscoped_blob_name] = workspace.FetchBlob(
scoped_blob_name)
log.debug(
'{:s} -> {:s}'.format(scoped_blob_name, unscoped_blob_name))
log.info('to weights file {}'.format(params_file))
try:
with open(params_file, 'w') as fwrite:
pickle.dump(dict(blobs=save_blobs), fwrite, pickle.HIGHEST_PROTOCOL)
except IOError as e:
log.error('I/O error({0}): {1}'.format(e.errno, e.strerror))
def unscope_name(blob_name):
return blob_name[blob_name.rfind(scope._NAMESCOPE_SEPARATOR) + 1:]
def scoped_name(blob_name):
return scope.CurrentNameScope() + blob_name
|
pytorch-master
|
caffe2/contrib/playground/checkpoint.py
|
from abc import abstractmethod
class Meter(object):
@abstractmethod
def __init__(self, **kwargs):
pass
@abstractmethod
def Reset(self):
pass
@abstractmethod
def Add(self):
pass
@abstractmethod
def Compute(self):
pass
|
pytorch-master
|
caffe2/contrib/playground/meter.py
|
import caffe2.contrib.playground.meter as Meter
from caffe2.python import workspace
import numpy as np
class ComputeTopKAccuracy(Meter.Meter):
# Python default arguments are evaluated once when the function is
# defined, not each time the function is called
# This means that if you use a mutable default argument and mutate it,
# you will and have mutated that object for
# all future calls to the function as well.
# def __init__(self, blob_name=['softmax', 'label'], opts=None, topk=1):
def __init__(self, blob_name=None, opts=None, topk=1):
if blob_name is None:
blob_name = ['softmax', 'label']
self.blob_name = blob_name
self.opts = opts
self.topk = topk
self.iter = 0
self.value = 0
def Reset(self):
self.iter = 0
self.value = 0
def Add(self):
for idx in range(self.opts['distributed']['first_xpu_id'],
self.opts['distributed']['first_xpu_id'] +
self.opts['distributed']['num_xpus']):
prefix = '{}_{}/'.format(self.opts['distributed']['device'], idx)
softmax = workspace.FetchBlob(prefix + self.blob_name[0])
labels = workspace.FetchBlob(prefix + self.blob_name[1])
output = np.squeeze(softmax)
target = np.squeeze(labels)
if len(output.shape) == 1:
output = output.reshape((1, output.shape[0]))
else:
assert len(output.shape) == 2, \
'wrong output size (1D or 2D expected)'
assert len(target.shape) == 1, 'wrong target size (1D expected)'
assert output.shape[0] == target.shape[0], \
'target and output do not match'
N = output.shape[0]
pred = np.argsort(-output, axis=1)[:, :self.topk]
correct = pred.astype(target.dtype) == np.repeat(
target.reshape((N, 1)), [self.topk], axis=1)
self.value += np.sum(correct[:, :self.topk])
self.iter += N
def Compute(self):
result = self.value / self.iter
self.Reset()
return result
|
pytorch-master
|
caffe2/contrib/playground/compute_topk_accuracy.py
|
pytorch-master
|
caffe2/contrib/playground/__init__.py
|
|
from abc import abstractmethod
from caffe2.python import workspace
from caffe2.python import timeout_guard
from caffe2.python import data_parallel_model
from . import checkpoint as checkpoint
from . import ModuleRegister as ModuleRegister
from . import module_map as module_map
# instantiate logger outside of distributed operators may trigger error
# logger need to be created in each idividual operator instead.
import os
import inspect
import time
import logging
logging.basicConfig()
log = logging.getLogger("AnyExp")
log.setLevel(logging.DEBUG)
def initOpts(opts):
workspace.GlobalInit(
['caffe2', '--caffe2_log_level=2', '--caffe2_gpu_memory_tracking=0'])
assert (opts['distributed']['num_gpus'] > 0 or
opts['distributed']['num_cpus'] > 0),\
"Need to specify num_gpus or num_cpus to decide which device to use."
trainWithCPU = (opts['distributed']['num_gpus'] == 0)
num_xpus = opts['distributed']['num_cpus'] if \
trainWithCPU else opts['distributed']['num_gpus']
first_xpu = opts['distributed']['first_cpu_id'] if \
trainWithCPU else opts['distributed']['first_gpu_id']
opts['distributed']['device'] = 'cpu' if trainWithCPU else 'gpu'
opts['model_param']['combine_spatial_bn'] =\
trainWithCPU and opts['model_param']['combine_spatial_bn']
opts['distributed']['num_xpus'] = num_xpus
opts['distributed']['first_xpu_id'] = first_xpu
opts['temp_var'] = {}
opts['temp_var']['metrics_output'] = {}
return opts
def initDefaultModuleMap():
registerModuleMap(module_map)
def registerModuleMap(module_map):
ModuleRegister.registerModuleMap(module_map)
def aquireDatasets(opts):
myAquireDataModule = ModuleRegister.getModule(opts['input']['input_name_py'])
return myAquireDataModule.get_input_dataset(opts)
def createTrainerClass(opts):
return ModuleRegister.constructTrainerClass(AnyExpTrainer, opts)
def overrideAdditionalMethods(myTrainerClass, opts):
return ModuleRegister.overrideAdditionalMethods(myTrainerClass, opts)
def initialize_params_from_file(*args, **kwargs):
return checkpoint.initialize_params_from_file(*args, **kwargs)
class AnyExpTrainer(object):
def __init__(self, opts):
import logging
logging.basicConfig()
log = logging.getLogger("AnyExp")
log.setLevel(logging.DEBUG)
self.log = log
self.opts = opts
self.train_dataset = None
self.test_dataset = None
self.train_df = None
self.test_df = None
self.metrics = {}
self.plotsIngredients = []
self.record_epochs = []
self.samples_per_sec = []
self.secs_per_train = []
self.metrics_output = opts['temp_var']['metrics_output']
first_xpu = opts['distributed']['first_xpu_id']
num_xpus = opts['distributed']['num_xpus']
self.xpus = range(first_xpu, first_xpu + num_xpus)
self.total_batch_size = \
self.opts['epoch_iter']['batch_per_device'] * \
self.opts['distributed']['num_xpus'] * \
self.opts['distributed']['num_shards']
self.epoch_iterations = \
self.opts['epoch_iter']['num_train_sample_per_epoch'] // \
self.total_batch_size
if len(opts['input']['datasets']) > 0:
self.train_df = opts['input']['datasets'][0]
if len(opts['input']['datasets']) == 2:
self.test_df = opts['input']['datasets'][1]
# at this point, the intance of this class becomes many instances
# running on different machines. Most of their attributes are same,
# but the shard_ids are different.
self.shard_id = opts['temp_var']['shard_id']
self.start_epoch = opts['temp_var']['start_epoch']
self.epoch = opts['temp_var']['epoch']
self.epochs_to_run = opts['epoch_iter']['num_epochs_per_flow_schedule']
log.info('opts: {}'.format(str(opts)))
@abstractmethod
def get_input_dataset(self, opts):
pass
@abstractmethod
def get_model_input_fun(self):
pass
@abstractmethod
def init_model(self):
pass
def init_metrics(self):
metrics = self.opts['output']['metrics']
for metric in metrics:
meterClass = self.getMeterClass(metric['meter_py'])
# log.info('metric.meter_kargs {}'.format(metric.meter_kargs))
# log.info('type meter_kargs {}'.format(type(metric.meter_kargs)))
meterInstance = meterClass(opts=self.opts, **metric['meter_kargs'])
self.add_metric(metric['name'], meterInstance, metric['is_train'])
def getMeterClass(self, meterName):
return ModuleRegister.getClassFromModule(meterName, meterName)
def add_metric(self, name, calculator, is_train):
metrics = self.metrics
metrics[name] = {}
metrics[name]['calculator'] = calculator
metrics[name]['is_train'] = is_train
metrics[name]['output'] = []
def extendMetricsOutput(self):
metrics_output = self.metrics_output
if not metrics_output:
metrics_output['epochs'] = self.record_epochs
metrics_output['samples_per_sec'] = self.samples_per_sec
metrics_output['secs_per_train'] = self.secs_per_train
for metric, value in self.metrics.items():
metrics_output[metric] = value['output']
else:
metrics_output['epochs'].extend(self.record_epochs)
metrics_output['samples_per_sec'].extend(self.samples_per_sec)
metrics_output['secs_per_train'].extend(self.secs_per_train)
for metric, value in self.metrics.items():
metrics_output[metric].extend(value['output'])
@abstractmethod
def init_plots(self):
pass
def add_plot(self, x, x_title, ys, y_title):
plotsIngredients = self.plotsIngredients
aPlotIngredients = {}
aPlotIngredients['x'] = x
aPlotIngredients['x_title'] = x_title
aPlotIngredients['ys'] = ys
aPlotIngredients['y_title'] = y_title
plotsIngredients.append(aPlotIngredients)
@abstractmethod
def init_logs(self):
pass
def list_of_epochs(self):
iter_end_point = min(self.opts['epoch_iter']['num_epochs'],
self.epoch +
self.opts['epoch_iter']['num_epochs_per_flow_schedule'])
return range(self.epoch, iter_end_point)
def list_of_epoch_iters(self):
return range(0, self.epoch_iterations)
@abstractmethod
def fun_per_epoch_b4RunNet(self, epoch):
pass
@abstractmethod
def fun_per_epoch_aftRunNet(self, epoch):
pass
def checkpoint(self, epoch):
self.model_path = checkpoint.save_model_params(
True, self.train_model, self.gen_checkpoint_path(True, epoch + 1),
epoch + 1, self.opts, float('-inf'))
def gen_checkpoint_path(self, is_checkpoint, epoch):
if (is_checkpoint):
filename = "model_checkpoint_epoch{}.pkl".format(epoch)
else:
filename = "model_final.pkl"
return self.opts['output']['checkpoint_folder'] + filename
# @abstractmethod
# def gen_checkpoint_path(self, is_checkpoint, epoch):
# pass
@abstractmethod
def fun_per_iter_b4RunNet(self, epoch, epoch_iter):
pass
@abstractmethod
def fun_per_iter_aftRunNetB4Test(self, epoch, epoch_iter):
pass
@abstractmethod
def fun_per_iter_aftRunNetAftTest(self, epoch, epoch_iter):
pass
@abstractmethod
def fun_conclude_operator(self, opts):
pass
def createMetricsPlotsModelsOutputs(self):
self.extendMetricsOutput()
self.model_output = self.model_path
@abstractmethod
def assembleAllOutputs(self):
pass
@abstractmethod
def gen_input_builder_fun(self, model, dataset, is_train):
pass
@abstractmethod
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
pass
@abstractmethod
def gen_param_update_builder_fun(self, model, dataset, is_train):
pass
@abstractmethod
def gen_optimizer_fun(self, model, dataset, is_train):
pass
@abstractmethod
def gen_rendezvous_ctx(self, model, dataset, is_train):
pass
@abstractmethod
def run_training_net(self):
pass
@abstractmethod
def run_testing_net(self):
if self.test_model is None:
return
timeout = 2000.0
with timeout_guard.CompleteInTimeOrDie(timeout):
workspace.RunNet(self.test_model.net.Proto().name)
# @abstractmethod
def planning_output(self):
self.init_metrics()
self.init_plots()
self.init_logs()
def prep_data_parallel_models(self):
self.prep_a_data_parallel_model(self.train_model,
self.train_dataset, True)
self.prep_a_data_parallel_model(self.test_model,
self.test_dataset, False)
def prep_a_data_parallel_model(self, model, dataset, is_train):
if model is None:
return
log.info('in prep_a_data_parallel_model')
param_update = \
self.gen_param_update_builder_fun(model, dataset, is_train) \
if self.gen_param_update_builder_fun is not None else None
log.info('in prep_a_data_parallel_model param_update done ')
optimizer = \
self.gen_optimizer_fun(model, dataset, is_train) \
if self.gen_optimizer_fun is not None else None
log.info('in prep_a_data_parallel_model optimizer done ')
max_ops = self.opts['model_param']['max_concurrent_distributed_ops']
data_parallel_model.Parallelize(
model,
input_builder_fun=self.gen_input_builder_fun(model, dataset, is_train),
forward_pass_builder_fun=self.gen_forward_pass_builder_fun(
model, dataset, is_train),
param_update_builder_fun=param_update,
optimizer_builder_fun=optimizer,
devices=self.xpus,
rendezvous=self.gen_rendezvous_ctx(model, dataset, is_train),
broadcast_computed_params=False,
optimize_gradient_memory=self.opts['model_param']['memonger'],
use_nccl=self.opts['model_param']['cuda_nccl'],
max_concurrent_distributed_ops=max_ops,
cpu_device=(self.opts['distributed']['device'] == 'cpu'),
# "shared model" will only keep model parameters for cpu_0 or gpu_0
# will cause issue when initialize each gpu_0, gpu_1, gpu_2 ...
# shared_model=(self.opts['distributed']['device'] == 'cpu'),
combine_spatial_bn=self.opts['model_param']['combine_spatial_bn'],
)
log.info('in prep_a_data_parallel_model Parallelize done ')
# log.info("Current blobs in workspace: {}".format(workspace.Blobs()))
workspace.RunNetOnce(model.param_init_net)
log.info('in prep_a_data_parallel_model RunNetOnce done ')
# for op in model.net.Proto().op:
# log.info('op type engine {} {}'.format(op.type, op.engine))
log.info('model.net.Proto() {}'.format(model.net.Proto()))
workspace.CreateNet(model.net)
# for op in model.net.Proto().op:
# log.info('after CreateNet op type engine {} {}'.
# format(op.type, op.engine))
log.info('in prep_a_data_parallel_model CreateNet done ')
def loadCheckpoint(self):
opts = self.opts
previous_checkpoint = opts['temp_var']['checkpoint_model']
pretrained_model = opts['temp_var']['pretrained_model']
num_xpus = opts['distributed']['num_xpus']
if (previous_checkpoint is not None):
if os.path.exists(previous_checkpoint):
log.info('Load previous checkpoint:{}'.format(
previous_checkpoint
))
start_epoch, prev_checkpointed_lr, _best_metric = \
checkpoint.initialize_params_from_file(
model=self.train_model,
weights_file=previous_checkpoint,
num_xpus=num_xpus,
opts=opts,
broadcast_computed_param=True,
reset_epoch=False,
)
elif pretrained_model is not None and os.path.exists(pretrained_model):
log.info("Load pretrained model: {}".format(pretrained_model))
start_epoch, prev_checkpointed_lr, best_metric = \
checkpoint.initialize_params_from_file(
model=self.train_model,
weights_file=pretrained_model,
num_xpus=num_xpus,
opts=opts,
broadcast_computed_param=True,
reset_epoch=opts['model_param']['reset_epoch'],
)
data_parallel_model.FinalizeAfterCheckpoint(self.train_model)
def buildModelAndTrain(self, opts):
log.info('in buildModelAndTrain, trainer_input: {}'.format(str(opts)))
log.info("check type self: {}".format(type(self)))
log.info("check self dir: {}".format(dir(self)))
log.info("check self source: {}".format(self.__dict__))
log.info("check self get_input_dataset methods: {}".
format(inspect.getsource(self.get_input_dataset)))
log.info("check self gen_input_builder_fun method: {}".
format(inspect.getsource(self.gen_input_builder_fun)))
log.info("check self gen_forward_pass_builder_fun method: {}".
format(inspect.getsource(self.gen_forward_pass_builder_fun)))
if self.gen_param_update_builder_fun is not None:
log.info("check self gen_param_update_builder_fun method: {}".
format(inspect.getsource(self.gen_param_update_builder_fun)))
else:
log.info("check self gen_optimizer_fun method: {}".
format(inspect.getsource(self.gen_optimizer_fun)))
log.info("check self assembleAllOutputs method: {}".
format(inspect.getsource(self.assembleAllOutputs)))
log.info("check self prep_data_parallel_models method: {}".
format(inspect.getsource(self.prep_data_parallel_models)))
self.get_model_input_fun()
self.init_model()
self.planning_output()
self.prep_data_parallel_models()
self.loadCheckpoint()
for epoch in self.list_of_epochs():
log.info("start training epoch {}".format(epoch))
self.fun_per_epoch_b4RunNet(epoch)
for epoch_iter in self.list_of_epoch_iters():
self.iter_start_time = time.time()
self.fun_per_iter_b4RunNet(epoch, epoch_iter)
if self.train_model is not None:
self.run_training_net()
self.fun_per_iter_aftRunNetB4Test(epoch, epoch_iter)
self.iter_end_time = time.time()
if (epoch_iter %
opts['epoch_iter']['num_train_iteration_per_test'] == 0):
secs_per_train = (self.iter_end_time - self.iter_start_time)
self.secs_per_train.append(secs_per_train)
sample_trained = self.total_batch_size
samples_per_sec = sample_trained / secs_per_train
self.samples_per_sec.append(samples_per_sec)
self.fract_epoch = (epoch +
float(epoch_iter) / self.epoch_iterations)
self.record_epochs.append(self.fract_epoch)
for key in self.metrics:
metric = self.metrics[key]
if not metric['is_train']:
continue
metric['calculator'].Add()
metric['output'].append(metric['calculator'].Compute())
self.test_loop_start_time = time.time()
for _test_iter in range(0, opts['epoch_iter']['num_test_iter']):
self.run_testing_net()
for key in self.metrics:
metric = self.metrics[key]
if metric['is_train']:
continue
metric['calculator'].Add()
self.test_loop_end_time = time.time()
self.sec_per_test_loop = \
self.test_loop_end_time - self.test_loop_start_time
for metric in self.metrics.values():
if metric['is_train']:
continue
metric['output'].append(metric['calculator'].Compute())
logStr = 'epoch:{}/{} iter:{}/{} secs_per_train:{} '.format(
self.fract_epoch, self.opts['epoch_iter']['num_epochs'],
epoch_iter, self.epoch_iterations, secs_per_train)
logStr += 'samples_per_sec:{} loop {} tests takes {} sec'.format(
samples_per_sec, opts['epoch_iter']['num_test_iter'],
self.sec_per_test_loop)
for metric, value in self.metrics.items():
logStr += ' {}:{} '.format(metric, value['output'][-1])
log.info('Iter Stats: {}'.format(logStr))
self.fun_per_iter_aftRunNetAftTest(epoch, epoch_iter)
self.checkpoint(epoch)
self.fun_per_epoch_aftRunNet(epoch)
self.fun_conclude_operator()
self.createMetricsPlotsModelsOutputs()
return self.assembleAllOutputs()
|
pytorch-master
|
caffe2/contrib/playground/AnyExp.py
|
# Input
import caffe2.contrib.playground.resnetdemo.\
gfs_IN1k as gfs_IN1k # noqa
# model
import caffe2.contrib.playground.resnetdemo.\
IN1k_resnet as IN1k_resnet # noqa
import caffe2.contrib.playground.resnetdemo.\
IN1k_resnet_no_test_model as IN1k_resnet_no_test_model # noqa
# Additional override
import caffe2.contrib.playground.resnetdemo.\
override_no_test_model_no_checkpoint as override_no_test_model_no_checkpoint # noqa
# FORWARD_PASS
import caffe2.contrib.playground.resnetdemo.\
caffe2_resnet50_default_forward as caffe2_resnet50_default_forward # noqa
import caffe2.contrib.playground.resnetdemo.\
explicit_resnet_forward as explicit_resnet_forward # noqa
# PARAMETER_UPDATE
import caffe2.contrib.playground.resnetdemo.\
caffe2_resnet50_default_param_update as caffe2_resnet50_default_param_update # noqa
import caffe2.contrib.playground.resnetdemo.\
explicit_resnet_param_update as explicit_resnet_param_update # noqa
# RENDEZVOUS
import caffe2.contrib.playground.resnetdemo.\
rendezvous_filestore as rendezvous_filestore # noqa
# OUTPUT
import caffe2.contrib.playground.\
output_generator as output_generator # noqa
# METERS
# for meters, use the class name as your module name in this map
import caffe2.contrib.playground.\
compute_loss as ComputeLoss # noqa
import caffe2.contrib.playground.\
compute_topk_accuracy as ComputeTopKAccuracy # noqa
|
pytorch-master
|
caffe2/contrib/playground/module_map.py
|
from caffe2.python import timeout_guard
def fun_conclude_operator(self):
# Ensure the program exists. This is to "fix" some unknown problems
# causing the job sometimes get stuck.
timeout_guard.EuthanizeIfNecessary(600.0)
def assembleAllOutputs(self):
output = {}
output['train_model'] = self.train_model
output['test_model'] = self.test_model
output['model'] = self.model_output
output['metrics'] = self.metrics_output
return output
|
pytorch-master
|
caffe2/contrib/playground/output_generator.py
|
import argparse
import json
import os
import caffe2.contrib.playground.AnyExp as AnyExp
import caffe2.contrib.playground.checkpoint as checkpoint
import logging
logging.basicConfig()
log = logging.getLogger("AnyExpOnTerm")
log.setLevel(logging.DEBUG)
def runShardedTrainLoop(opts, myTrainFun):
start_epoch = 0
pretrained_model = opts['model_param']['pretrained_model']
if pretrained_model != '' and os.path.exists(pretrained_model):
# Only want to get start_epoch.
start_epoch, prev_checkpointed_lr, best_metric = \
checkpoint.initialize_params_from_file(
model=None,
weights_file=pretrained_model,
num_xpus=1,
opts=opts,
broadcast_computed_param=True,
reset_epoch=opts['model_param']['reset_epoch'],
)
log.info('start epoch: {}'.format(start_epoch))
pretrained_model = None if pretrained_model == '' else pretrained_model
ret = None
pretrained_model = ""
shard_results = []
for epoch in range(start_epoch,
opts['epoch_iter']['num_epochs'],
opts['epoch_iter']['num_epochs_per_flow_schedule']):
# must support checkpoint or the multiple schedule will always
# start from initial state
checkpoint_model = None if epoch == start_epoch else ret['model']
pretrained_model = None if epoch > start_epoch else pretrained_model
shard_results = []
# with LexicalContext('epoch{}_gang'.format(epoch),gang_schedule=False):
for shard_id in range(opts['distributed']['num_shards']):
opts['temp_var']['shard_id'] = shard_id
opts['temp_var']['pretrained_model'] = pretrained_model
opts['temp_var']['checkpoint_model'] = checkpoint_model
opts['temp_var']['epoch'] = epoch
opts['temp_var']['start_epoch'] = start_epoch
shard_ret = myTrainFun(opts)
shard_results.append(shard_ret)
ret = None
# always only take shard_0 return
for shard_ret in shard_results:
if shard_ret is not None:
ret = shard_ret
opts['temp_var']['metrics_output'] = ret['metrics']
break
log.info('ret is: {}'.format(str(ret)))
return ret
def trainFun():
def simpleTrainFun(opts):
trainerClass = AnyExp.createTrainerClass(opts)
trainerClass = AnyExp.overrideAdditionalMethods(trainerClass, opts)
trainer = trainerClass(opts)
return trainer.buildModelAndTrain(opts)
return simpleTrainFun
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Any Experiment training.')
parser.add_argument("--parameters-json", type=json.loads,
help='model options in json format', dest="params")
args = parser.parse_args()
opts = args.params['opts']
opts = AnyExp.initOpts(opts)
log.info('opts is: {}'.format(str(opts)))
AnyExp.initDefaultModuleMap()
opts['input']['datasets'] = AnyExp.aquireDatasets(opts)
# defined this way so that AnyExp.trainFun(opts) can be replaced with
# some other custermized training function.
ret = runShardedTrainLoop(opts, trainFun())
log.info('ret is: {}'.format(str(ret)))
|
pytorch-master
|
caffe2/contrib/playground/AnyExpOnTerm.py
|
import inspect
import logging
logging.basicConfig()
log = logging.getLogger("ModuleRegister")
log.setLevel(logging.DEBUG)
MODULE_MAPS = []
def registerModuleMap(module_map):
MODULE_MAPS.append(module_map)
log.info("ModuleRegister get modules from ModuleMap content: {}".
format(inspect.getsource(module_map)))
def constructTrainerClass(myTrainerClass, opts):
log.info("ModuleRegister, myTrainerClass name is {}".
format(myTrainerClass.__name__))
log.info("ModuleRegister, myTrainerClass type is {}".
format(type(myTrainerClass)))
log.info("ModuleRegister, myTrainerClass dir is {}".
format(dir(myTrainerClass)))
myInitializeModelModule = getModule(opts['model']['model_name_py'])
log.info("ModuleRegister, myInitializeModelModule dir is {}".
format(dir(myInitializeModelModule)))
myTrainerClass.init_model = myInitializeModelModule.init_model
myTrainerClass.run_training_net = myInitializeModelModule.run_training_net
myTrainerClass.fun_per_iter_b4RunNet = \
myInitializeModelModule.fun_per_iter_b4RunNet
myTrainerClass.fun_per_epoch_b4RunNet = \
myInitializeModelModule.fun_per_epoch_b4RunNet
myInputModule = getModule(opts['input']['input_name_py'])
log.info("ModuleRegister, myInputModule {} dir is {}".
format(opts['input']['input_name_py'], myInputModule.__name__))
# Override input methods of the myTrainerClass class
myTrainerClass.get_input_dataset = myInputModule.get_input_dataset
myTrainerClass.get_model_input_fun = myInputModule.get_model_input_fun
myTrainerClass.gen_input_builder_fun = myInputModule.gen_input_builder_fun
# myForwardPassModule = GetForwardPassModule(opts)
myForwardPassModule = getModule(opts['model']['forward_pass_py'])
myTrainerClass.gen_forward_pass_builder_fun = \
myForwardPassModule.gen_forward_pass_builder_fun
myParamUpdateModule = getModule(opts['model']['parameter_update_py'])
myTrainerClass.gen_param_update_builder_fun =\
myParamUpdateModule.gen_param_update_builder_fun \
if myParamUpdateModule is not None else None
myOptimizerModule = getModule(opts['model']['optimizer_py'])
myTrainerClass.gen_optimizer_fun = \
myOptimizerModule.gen_optimizer_fun \
if myOptimizerModule is not None else None
myRendezvousModule = getModule(opts['model']['rendezvous_py'])
myTrainerClass.gen_rendezvous_ctx = \
myRendezvousModule.gen_rendezvous_ctx \
if myRendezvousModule is not None else None
# override output module
myOutputModule = getModule(opts['output']['gen_output_py'])
log.info("ModuleRegister, myOutputModule is {}".
format(myOutputModule.__name__))
myTrainerClass.fun_conclude_operator = myOutputModule.fun_conclude_operator
myTrainerClass.assembleAllOutputs = myOutputModule.assembleAllOutputs
return myTrainerClass
def overrideAdditionalMethods(myTrainerClass, opts):
log.info("B4 additional override myTrainerClass source {}".
format(inspect.getsource(myTrainerClass)))
# override any additional modules
myAdditionalOverride = getModule(opts['model']['additional_override_py'])
if myAdditionalOverride is not None:
for funcName, funcValue in inspect.getmembers(myAdditionalOverride,
inspect.isfunction):
setattr(myTrainerClass, funcName, funcValue)
log.info("Aft additional override myTrainerClass's source {}".
format(inspect.getsource(myTrainerClass)))
return myTrainerClass
def getModule(moduleName):
log.info("get module {} from MODULE_MAPS content {}".format(moduleName, str(MODULE_MAPS)))
myModule = None
for ModuleMap in MODULE_MAPS:
log.info("iterate through MODULE_MAPS content {}".
format(str(ModuleMap)))
for name, obj in inspect.getmembers(ModuleMap):
log.info("iterate through MODULE_MAPS a name {}".format(str(name)))
if name == moduleName:
log.info("AnyExp get module {} with source:{}".
format(moduleName, inspect.getsource(obj)))
myModule = obj
return myModule
return None
def getClassFromModule(moduleName, className):
myClass = None
for ModuleMap in MODULE_MAPS:
for name, obj in inspect.getmembers(ModuleMap):
if name == moduleName:
log.info("ModuleRegistry from module {} get class {} of source:{}".
format(moduleName, className, inspect.getsource(obj)))
myClass = getattr(obj, className)
return myClass
return None
|
pytorch-master
|
caffe2/contrib/playground/ModuleRegister.py
|
import caffe2.contrib.playground.meter as Meter
from caffe2.python import workspace
class ComputeLoss(Meter.Meter):
def __init__(self, opts=None, blob_name=''):
self.blob_name = blob_name
self.opts = opts
self.iter = 0
self.value = 0
def Reset(self):
self.iter = 0
self.value = 0
def Add(self):
"""Average values of a blob on each gpu"""
value = 0
for idx in range(self.opts['distributed']['first_xpu_id'],
self.opts['distributed']['first_xpu_id'] +
self.opts['distributed']['num_xpus']):
value += workspace.FetchBlob('{}_{}/{}'.
format(self.opts['distributed']['device'], idx, self.blob_name))
self.value += value
self.iter += 1
def Compute(self):
result = self.opts['distributed']['num_shards'] * self.value / self.iter
self.Reset()
return result
|
pytorch-master
|
caffe2/contrib/playground/compute_loss.py
|
def gen_param_update_builder_fun(self, model, dataset, is_train):
if not is_train:
return None
else:
def add_parameter_update_ops(model):
model.AddWeightDecay(1e-4)
ITER = model.Iter("ITER")
stepsz = int(30 *
self.opts['epoch_iter']['num_train_sample_per_epoch'] /
self.total_batch_size)
LR = model.net.LearningRate(
[ITER],
"lr",
base_lr=self.opts['model_param']['base_learning_rate'],
policy="step",
stepsize=stepsz,
gamma=0.1,
)
params = model.GetParams()
assert(len(params) > 0)
for param in params:
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
# Update param_grad and param_momentum in place
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, LR, param],
[param_grad, param_momentum, param],
momentum=0.9,
nesterov=1
)
return add_parameter_update_ops
|
pytorch-master
|
caffe2/contrib/playground/resnetdemo/caffe2_resnet50_default_param_update.py
|
import logging
logging.basicConfig()
log = logging.getLogger("AnyExp")
log.setLevel(logging.DEBUG)
# For more depths, add the block config here
BLOCK_CONFIG = {
18: (2, 2, 2, 2),
34: (3, 4, 6, 3),
50: (3, 4, 6, 3),
101: (3, 4, 23, 3),
152: (3, 8, 36, 3),
200: (3, 32, 36, 3),
264: (3, 64, 36, 3),
284: (3, 32, 64, 3),
}
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
split = 'train' if is_train else 'test'
opts = self.opts
def model_creator(model, loss_scale):
model, softmax, loss = resnet_imagenet_create_model(
model=model,
data='data',
labels='label',
split=split,
opts=opts,
dataset=dataset,
)
return [loss]
return model_creator
def resnet_imagenet_create_model(model, data, labels, split, opts, dataset):
model_helper = ResNetModelHelper(model, split, opts)
opts_depth = opts['model_param']['num_layer']
engine = opts['model_param']['engine']
log.info(' | ResNet-{} Imagenet'.format(opts_depth))
assert opts_depth in BLOCK_CONFIG.keys(), \
'Block config is not defined for specified model depth. Please check.'
(n1, n2, n3, n4) = BLOCK_CONFIG[opts_depth]
num_features = 2048
residual_block = model_helper.bottleneck_block
if opts_depth in [18, 34]:
num_features = 512
residual_block = model_helper.basic_block
num_classes = 1000
conv_blob = model.Conv(
data, 'conv1', 3, 64, 7, stride=2, pad=3, weight_init=('MSRAFill', {}),
bias_init=('ConstantFill', {'value': 0.}), no_bias=0, engine=engine
)
test_mode = False
if split in ['test', 'val']:
test_mode = True
bn_blob = model.SpatialBN(
conv_blob, 'res_conv1_bn', 64,
# does not appear to affect test_loss performance
# epsilon=1e-3,
epsilon=opts['model_param']['bn_epsilon'],
# momentum=0.1,
momentum=opts['model_param']['bn_momentum'],
is_test=test_mode,
)
relu_blob = model.Relu(bn_blob, bn_blob)
max_pool = model.MaxPool(relu_blob, 'pool1', kernel=3, stride=2, pad=1)
# TODO: This can be further optimized by passing dim_in, dim_out = features,
# dim_out = features * 4
if opts_depth in [50, 101, 152, 200, 264, 284]:
blob_in, dim_in = model_helper.residual_layer(
residual_block, max_pool, 64, 256, stride=1, num_blocks=n1,
prefix='res2', dim_inner=64
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 512, stride=2, num_blocks=n2,
prefix='res3', dim_inner=128
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 1024, stride=2, num_blocks=n3,
prefix='res4', dim_inner=256
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 2048, stride=2, num_blocks=n4,
prefix='res5', dim_inner=512
)
elif opts_depth in [18, 34]:
blob_in, dim_in = model_helper.residual_layer(
residual_block, max_pool, 64, 64, stride=1, num_blocks=n1,
prefix='res2',
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 128, stride=2, num_blocks=n2,
prefix='res3',
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 256, stride=2, num_blocks=n3,
prefix='res4',
)
blob_in, dim_in = model_helper.residual_layer(
residual_block, blob_in, dim_in, 512, stride=2, num_blocks=n4,
prefix='res5',
)
pool_blob = model.AveragePool(blob_in, 'pool5', kernel=7, stride=1)
loss_scale = 1. / opts['distributed']['num_xpus'] / \
opts['distributed']['num_shards']
loss = None
fc_blob = model.FC(
pool_blob, 'pred', num_features, num_classes,
# does not appear to affect test_loss performance
# weight_init=('GaussianFill', {'std': opts.fc_init_std}),
# bias_init=('ConstantFill', {'value': 0.})
weight_init=None,
bias_init=None)
softmax, loss = model.SoftmaxWithLoss(
[fc_blob, labels],
['softmax', 'loss'],
scale=loss_scale)
model.Accuracy(['softmax', labels], 'accuracy')
return model, softmax, loss
class ResNetModelHelper():
def __init__(self, model, split, opts):
self.model = model
self.split = split
self.opts = opts
self.engine = opts['model_param']['engine']
# shortcut type B
def add_shortcut(self, blob_in, dim_in, dim_out, stride, prefix):
if dim_in == dim_out:
return blob_in
conv_blob = self.model.Conv(
blob_in, prefix, dim_in, dim_out, kernel=1,
stride=stride,
weight_init=("MSRAFill", {}),
bias_init=('ConstantFill', {'value': 0.}), no_bias=1, engine=self.engine
)
test_mode = False
if self.split in ['test', 'val']:
test_mode = True
bn_blob = self.model.SpatialBN(
conv_blob, prefix + "_bn", dim_out,
# epsilon=1e-3,
# momentum=0.1,
epsilon=self.opts['model_param']['bn_epsilon'],
momentum=self.opts['model_param']['bn_momentum'],
is_test=test_mode,
)
return bn_blob
def conv_bn(
self, blob_in, dim_in, dim_out, kernel, stride, prefix, group=1, pad=1,
):
conv_blob = self.model.Conv(
blob_in, prefix, dim_in, dim_out, kernel, stride=stride,
pad=pad, group=group,
weight_init=("MSRAFill", {}),
bias_init=('ConstantFill', {'value': 0.}), no_bias=1, engine=self.engine
)
test_mode = False
if self.split in ['test', 'val']:
test_mode = True
bn_blob = self.model.SpatialBN(
conv_blob, prefix + "_bn", dim_out,
epsilon=self.opts['model_param']['bn_epsilon'],
momentum=self.opts['model_param']['bn_momentum'],
is_test=test_mode,
)
return bn_blob
def conv_bn_relu(
self, blob_in, dim_in, dim_out, kernel, stride, prefix, pad=1, group=1,
):
bn_blob = self.conv_bn(
blob_in, dim_in, dim_out, kernel, stride, prefix, group=group,
pad=pad
)
return self.model.Relu(bn_blob, bn_blob)
# 3(a)this block uses multi-way group conv implementation that splits blobs
def multiway_bottleneck_block(
self, blob_in, dim_in, dim_out, stride, prefix, dim_inner, group
):
blob_out = self.conv_bn_relu(
blob_in, dim_in, dim_inner, 1, 1, prefix + "_branch2a", pad=0,
)
conv_blob = self.model.GroupConv_Deprecated(
blob_out, prefix + "_branch2b", dim_inner, dim_inner, kernel=3,
stride=stride, pad=1, group=group, weight_init=("MSRAFill", {}),
bias_init=('ConstantFill', {'value': 0.}), no_bias=1, engine=self.engine
)
test_mode = False
if self.split in ['test', 'val']:
test_mode = True
bn_blob = self.model.SpatialBN(
conv_blob, prefix + "_branch2b_bn", dim_out,
epsilon=self.opts['model_param']['bn_epsilon'],
momentum=self.opts['model_param']['bn_momentum'], is_test=test_mode,
)
relu_blob = self.model.Relu(bn_blob, bn_blob)
bn_blob = self.conv_bn(
relu_blob, dim_inner, dim_out, 1, 1, prefix + "_branch2c", pad=0
)
if self.opts['model_param']['custom_bn_init']:
self.model.param_init_net.ConstantFill(
[bn_blob + '_s'], bn_blob + '_s',
value=self.opts['model_param']['bn_init_gamma'])
sc_blob = self.add_shortcut(
blob_in, dim_in, dim_out, stride, prefix=prefix + "_branch1"
)
sum_blob = self.model.net.Sum([bn_blob, sc_blob], prefix + "_sum")
return self.model.Relu(sum_blob, sum_blob)
# 3(c) this block uses cudnn group conv op
def group_bottleneck_block(
self, blob_in, dim_in, dim_out, stride, prefix, dim_inner, group
):
blob_out = self.conv_bn_relu(
blob_in, dim_in, dim_inner, 1, 1, prefix + "_branch2a", pad=0,
)
blob_out = self.conv_bn_relu(
blob_out, dim_inner, dim_inner, 3, stride, prefix + "_branch2b",
group=group
)
bn_blob = self.conv_bn(
blob_out, dim_inner, dim_out, 1, 1, prefix + "_branch2c", pad=0
)
if self.opts['model_param']['custom_bn_init']:
self.model.param_init_net.ConstantFill(
[bn_blob + '_s'], bn_blob + '_s',
value=self.opts['model_param']['bn_init_gamma'])
sc_blob = self.add_shortcut(
blob_in, dim_in, dim_out, stride, prefix=prefix + "_branch1"
)
sum_blob = self.model.net.Sum([bn_blob, sc_blob], prefix + "_sum")
return self.model.Relu(sum_blob, sum_blob)
# bottleneck residual layer for 50, 101, 152 layer networks
def bottleneck_block(
self, blob_in, dim_in, dim_out, stride, prefix, dim_inner, group=None
):
blob_out = self.conv_bn_relu(
blob_in, dim_in, dim_inner, 1, 1, prefix + "_branch2a", pad=0,
)
blob_out = self.conv_bn_relu(
blob_out, dim_inner, dim_inner, 3, stride, prefix + "_branch2b",
)
bn_blob = self.conv_bn(
blob_out, dim_inner, dim_out, 1, 1, prefix + "_branch2c", pad=0
)
if self.opts['model_param']['custom_bn_init']:
self.model.param_init_net.ConstantFill(
[bn_blob + '_s'], bn_blob + '_s',
value=self.opts['model_param']['bn_init_gamma'])
sc_blob = self.add_shortcut(
blob_in, dim_in, dim_out, stride, prefix=prefix + "_branch1"
)
sum_blob = self.model.net.Sum([bn_blob, sc_blob], prefix + "_sum")
return self.model.Relu(sum_blob, sum_blob)
# basic layer for the 18 and 34 layer networks and the CIFAR data netwrorks
def basic_block(
self, blob_in, dim_in, dim_out, stride, prefix, dim_inner=None,
group=None,
):
blob_out = self.conv_bn_relu(
blob_in, dim_in, dim_out, 3, stride, prefix + "_branch2a"
)
bn_blob = self.conv_bn(
blob_out, dim_out, dim_out, 3, 1, prefix + "_branch2b", pad=1
)
sc_blob = self.add_shortcut(
blob_in, dim_in, dim_out, stride, prefix=prefix + "_branch1"
)
sum_blob = self.model.net.Sum([bn_blob, sc_blob], prefix + "_sum")
return self.model.Relu(sum_blob, sum_blob)
def residual_layer(
self, block_fn, blob_in, dim_in, dim_out, stride, num_blocks, prefix,
dim_inner=None, group=None
):
# prefix is something like: res2, res3, etc.
# each res layer has num_blocks stacked
for idx in range(num_blocks):
block_prefix = "{}_{}".format(prefix, idx)
block_stride = 2 if (idx == 0 and stride == 2) else 1
blob_in = block_fn(
blob_in, dim_in, dim_out, block_stride, block_prefix, dim_inner,
group
)
dim_in = dim_out
return blob_in, dim_in
|
pytorch-master
|
caffe2/contrib/playground/resnetdemo/explicit_resnet_forward.py
|
import numpy as np
from caffe2.python import workspace, cnn, core
from caffe2.python import timeout_guard
from caffe2.proto import caffe2_pb2
def init_model(self):
train_model = cnn.CNNModelHelper(
order="NCHW",
name="resnet",
use_cudnn=True,
cudnn_exhaustive_search=False
)
self.train_model = train_model
test_model = cnn.CNNModelHelper(
order="NCHW",
name="resnet_test",
use_cudnn=True,
cudnn_exhaustive_search=False,
init_params=False,
)
self.test_model = test_model
self.log.info("Model creation completed")
def fun_per_epoch_b4RunNet(self, epoch):
pass
def fun_per_iter_b4RunNet(self, epoch, epoch_iter):
learning_rate = 0.05
for idx in range(self.opts['distributed']['first_xpu_id'],
self.opts['distributed']['first_xpu_id'] +
self.opts['distributed']['num_xpus']):
caffe2_pb2_device = caffe2_pb2.CUDA if \
self.opts['distributed']['device'] == 'gpu' else \
caffe2_pb2.CPU
with core.DeviceScope(core.DeviceOption(caffe2_pb2_device, idx)):
workspace.FeedBlob(
'{}_{}/lr'.format(self.opts['distributed']['device'], idx),
np.array(learning_rate, dtype=np.float32)
)
def run_training_net(self):
timeout = 2000.0
with timeout_guard.CompleteInTimeOrDie(timeout):
workspace.RunNet(self.train_model.net.Proto().name)
|
pytorch-master
|
caffe2/contrib/playground/resnetdemo/IN1k_resnet.py
|
import numpy as np
from caffe2.python import workspace, cnn, core
from caffe2.python import timeout_guard
from caffe2.proto import caffe2_pb2
def init_model(self):
# if cudnn needs to be turned off, several other places
# need to be modified:
# 1. operators need to be constructed with engine option, like below:
# conv_blob = model.Conv(...engine=engine)
# 2. when launch model, opts['model_param']['engine'] = "" instead of "CUDNN"
# 2. caffe2_disable_implicit_engine_preference in operator.cc set to true
train_model = cnn.CNNModelHelper(
order="NCHW",
name="resnet",
use_cudnn=False,
cudnn_exhaustive_search=False,
)
self.train_model = train_model
# test_model = cnn.CNNModelHelper(
# order="NCHW",
# name="resnet_test",
# use_cudnn=False,
# cudnn_exhaustive_search=False,
# init_params=False,
# )
self.test_model = None
self.log.info("Model creation completed")
def fun_per_epoch_b4RunNet(self, epoch):
pass
def fun_per_iter_b4RunNet(self, epoch, epoch_iter):
learning_rate = 0.05
for idx in range(self.opts['distributed']['first_xpu_id'],
self.opts['distributed']['first_xpu_id'] +
self.opts['distributed']['num_xpus']):
caffe2_pb2_device = caffe2_pb2.CUDA if \
self.opts['distributed']['device'] == 'gpu' else \
caffe2_pb2.CPU
with core.DeviceScope(core.DeviceOption(caffe2_pb2_device, idx)):
workspace.FeedBlob(
'{}_{}/lr'.format(self.opts['distributed']['device'], idx),
np.array(learning_rate, dtype=np.float32)
)
def run_training_net(self):
timeout = 2000.0
with timeout_guard.CompleteInTimeOrDie(timeout):
workspace.RunNet(self.train_model.net.Proto().name)
|
pytorch-master
|
caffe2/contrib/playground/resnetdemo/IN1k_resnet_no_test_model.py
|
import caffe2.python.models.resnet as resnet
def gen_forward_pass_builder_fun(self, model, dataset, is_train):
def create_resnet50_model_ops(model, loss_scale):
[softmax, loss] = resnet.create_resnet50(
model,
"data",
num_input_channels=3,
num_labels=1000,
label="label",
)
model.Accuracy([softmax, "label"], "accuracy")
my_loss_scale = 1. / self.opts['distributed']['num_xpus'] / \
self.opts['distributed']['num_shards']
loss = model.Scale(loss, scale=my_loss_scale)
return [loss]
return create_resnet50_model_ops
|
pytorch-master
|
caffe2/contrib/playground/resnetdemo/caffe2_resnet50_default_forward.py
|
# # example1 using gfs as input source.
def gen_input_builder_fun(self, model, dataset, is_train):
if is_train:
input_path = self.opts['input']['train_input_path']
else:
input_path = self.opts['input']['test_input_path']
reader = model.CreateDB("reader",
db=input_path,
db_type='lmdb',
shard_id=self.shard_id,
num_shards=self.opts['distributed']['num_shards'],)
def AddImageInput(model, reader, batch_size, img_size):
'''
Image input operator that loads data from reader and
applies certain transformations to the images.
'''
data, label = model.ImageInput(
reader,
["data", "label"],
batch_size=batch_size,
use_caffe_datum=True,
mean=128.,
std=128.,
scale=256,
crop=img_size,
mirror=1,
is_test=True
)
data = model.StopGradient(data, data)
def add_image_input(model):
AddImageInput(
model,
reader,
batch_size=self.opts['epoch_iter']['batch_per_device'],
img_size=self.opts['input']['imsize'],
)
return add_image_input
def get_input_dataset(opts):
return []
def get_model_input_fun(self):
pass
|
pytorch-master
|
caffe2/contrib/playground/resnetdemo/gfs_IN1k.py
|
pytorch-master
|
caffe2/contrib/playground/resnetdemo/__init__.py
|
|
def checkpoint(self, epoch):
self.model_path = None
pass
def prep_data_parallel_models(self):
# only do train_model no test needed here
self.prep_a_data_parallel_model(self.train_model,
self.train_dataset, True)
def run_testing_net(self):
pass
|
pytorch-master
|
caffe2/contrib/playground/resnetdemo/override_no_test_model_no_checkpoint.py
|
from caffe2.python import core, workspace
from caffe2.python import dyndep
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:file_store_handler_ops')
# rendezvous should NOT be unique for each operator. It should have
# the same run_id on different operators. say we have two shards,
# both shards created rendezvous of run_id "aaa_bbb_epoch_09", and this
# rendezvous will wait for two shards to join because max_shards is specified
# to be 2. If each shard created an rendezvous with different run_id,
# each of them are waiting for different rendezvous to join, they will
# never wait for each other and therefore timeout eventually.
def gen_rendezvous_ctx(self, model, dataset, is_train):
if self.opts['distributed']['num_shards'] < 2:
return None
# have issue when try to set this up on more shards
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate", [], ["store_handler"],
path="/tmp",
prefix="epoch.{}".format(self.epoch),
)
)
rendezvous = dict(
kv_handler="store_handler",
shard_id=self.shard_id,
num_shards=self.opts['distributed']['num_shards'],
engine="GLOO",
# transport=args.distributed_transport,
transport="tcp",
# interface=interfaces[0],
interface=[],
exit_nets=None) if is_train else None
return rendezvous
|
pytorch-master
|
caffe2/contrib/playground/resnetdemo/rendezvous_filestore.py
|
from caffe2.python import workspace, core
from caffe2.proto import caffe2_pb2
def gen_param_update_builder_fun(self, model, dataset, is_train):
if not is_train:
return None
else:
# from sherlok
for idx in range(self.opts['distributed']['first_xpu_id'],
self.opts['distributed']['first_xpu_id'] +
self.opts['distributed']['num_xpus']):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, idx)):
workspace.CreateBlob('{}_{}/lr'.
format(self.opts['distributed']['device'], idx))
def add_parameter_update_ops(model):
model.Iter("ITER")
weight_decay = model.param_init_net.ConstantFill(
[], 'weight_decay', shape=[1],
value=self.opts['model_param']['weight_decay']
)
weight_decay_bn = model.param_init_net.ConstantFill(
[], 'weight_decay_bn', shape=[1],
value=self.opts['model_param']['weight_decay_bn']
)
one = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0
)
'''
Add the momentum-SGD update.
'''
params = model.GetParams()
assert(len(params) > 0)
for param in params:
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
if '_bn' in str(param):
model.WeightedSum(
[param_grad, one, param, weight_decay_bn], param_grad
)
else:
model.WeightedSum(
[param_grad, one, param, weight_decay], param_grad
)
# Update param_grad and param_momentum in place
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, 'lr', param],
[param_grad, param_momentum, param],
momentum=0.9,
nesterov=1
)
return add_parameter_update_ops
|
pytorch-master
|
caffe2/contrib/playground/resnetdemo/explicit_resnet_param_update.py
|
pytorch-master
|
caffe2/contrib/gloo/__init__.py
|
|
#!/usr/bin/env python3
from hypothesis import given, settings
import hypothesis.strategies as st
from multiprocessing import Process, Queue
import numpy as np
import os
import pickle
import tempfile
import shutil
from caffe2.python import core, workspace, dyndep
import caffe2.python.hypothesis_test_util as hu
from gloo.python import IoError
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:file_store_handler_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:redis_store_handler_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:store_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops_gpu")
op_engine = 'GLOO'
class TemporaryDirectory:
def __enter__(self):
self.tmpdir = tempfile.mkdtemp()
return self.tmpdir
def __exit__(self, type, value, traceback):
shutil.rmtree(self.tmpdir)
class TestCase(hu.HypothesisTestCase):
test_counter = 0
sync_counter = 0
def run_test_locally(self, fn, device_option=None, **kwargs):
# Queue for assertion errors on subprocesses
queue = Queue()
# Capture any exception thrown by the subprocess
def run_fn(*args, **kwargs):
try:
with core.DeviceScope(device_option):
fn(*args, **kwargs)
workspace.ResetWorkspace()
queue.put(True)
except Exception as ex:
queue.put(ex)
# Start N processes in the background
procs = []
for i in range(kwargs['comm_size']):
kwargs['comm_rank'] = i
proc = Process(
target=run_fn,
kwargs=kwargs)
proc.start()
procs.append(proc)
# Test complete, join background processes
while len(procs) > 0:
proc = procs.pop(0)
while proc.is_alive():
proc.join(10)
# Raise exception if we find any. Otherwise each worker
# should put a True into the queue
# Note that the following is executed ALSO after
# the last process was joined, so if ANY exception
# was raised, it will be re-raised here.
self.assertFalse(queue.empty(), "Job failed without a result")
o = queue.get()
if isinstance(o, Exception):
raise o
else:
self.assertTrue(o)
def run_test_distributed(self, fn, device_option=None, **kwargs):
comm_rank = os.getenv('COMM_RANK')
self.assertIsNotNone(comm_rank)
comm_size = os.getenv('COMM_SIZE')
self.assertIsNotNone(comm_size)
kwargs['comm_rank'] = int(comm_rank)
kwargs['comm_size'] = int(comm_size)
with core.DeviceScope(device_option):
fn(**kwargs)
workspace.ResetWorkspace()
def create_common_world(self, comm_rank, comm_size, tmpdir=None, existing_cw=None):
store_handler = "store_handler"
# If REDIS_HOST is set, use RedisStoreHandler for rendezvous.
if existing_cw is None:
redis_host = os.getenv("REDIS_HOST")
redis_port = int(os.getenv("REDIS_PORT", 6379))
if redis_host is not None:
workspace.RunOperatorOnce(
core.CreateOperator(
"RedisStoreHandlerCreate",
[],
[store_handler],
prefix=str(TestCase.test_counter) + "/",
host=redis_host,
port=redis_port))
else:
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate",
[],
[store_handler],
path=tmpdir))
common_world = "common_world"
else:
common_world = str(existing_cw) + ".forked"
if existing_cw is not None:
workspace.RunOperatorOnce(
core.CreateOperator(
"CloneCommonWorld",
[existing_cw],
[common_world],
sync=True,
engine=op_engine))
else:
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateCommonWorld",
[store_handler],
[common_world],
size=comm_size,
rank=comm_rank,
sync=True,
engine=op_engine))
return (store_handler, common_world)
def synchronize(self, store_handler, value, comm_rank=None):
TestCase.sync_counter += 1
blob = "sync_{}".format(TestCase.sync_counter)
if comm_rank == 0:
workspace.FeedBlob(blob, pickle.dumps(value))
workspace.RunOperatorOnce(
core.CreateOperator(
"StoreSet",
[store_handler, blob],
[]))
else:
workspace.RunOperatorOnce(
core.CreateOperator(
"StoreGet",
[store_handler],
[blob]))
return pickle.loads(workspace.FetchBlob(blob))
def _test_broadcast(self,
comm_rank=None,
comm_size=None,
blob_size=None,
num_blobs=None,
tmpdir=None,
use_float16=False,
):
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
blob_size = self.synchronize(
store_handler,
blob_size,
comm_rank=comm_rank)
num_blobs = self.synchronize(
store_handler,
num_blobs,
comm_rank=comm_rank)
for i in range(comm_size):
blobs = []
for j in range(num_blobs):
blob = "blob_{}".format(j)
offset = (comm_rank * num_blobs) + j
value = np.full(blob_size, offset,
np.float16 if use_float16 else np.float32)
workspace.FeedBlob(blob, value)
blobs.append(blob)
net = core.Net("broadcast")
net.Broadcast(
[common_world] + blobs,
blobs,
root=i,
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
for j in range(num_blobs):
np.testing.assert_array_equal(
workspace.FetchBlob(blobs[j]),
i * num_blobs)
# Run the net a few more times to check the operator
# works not just the first time it's called
for _tmp in range(4):
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
blob_size=st.integers(min_value=int(1e3), max_value=int(1e6)),
num_blobs=st.integers(min_value=1, max_value=4),
device_option=st.sampled_from([hu.cpu_do]),
use_float16=st.booleans())
@settings(deadline=10000)
def test_broadcast(self, comm_size, blob_size, num_blobs, device_option,
use_float16):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_broadcast,
blob_size=blob_size,
num_blobs=num_blobs,
use_float16=use_float16,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_broadcast,
comm_size=comm_size,
blob_size=blob_size,
num_blobs=num_blobs,
device_option=device_option,
tmpdir=tmpdir,
use_float16=use_float16)
def _test_allreduce(self,
comm_rank=None,
comm_size=None,
blob_size=None,
num_blobs=None,
tmpdir=None,
use_float16=False
):
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
blob_size = self.synchronize(
store_handler,
blob_size,
comm_rank=comm_rank)
num_blobs = self.synchronize(
store_handler,
num_blobs,
comm_rank=comm_rank)
blobs = []
for i in range(num_blobs):
blob = "blob_{}".format(i)
value = np.full(blob_size, (comm_rank * num_blobs) + i,
np.float16 if use_float16 else np.float32)
workspace.FeedBlob(blob, value)
blobs.append(blob)
net = core.Net("allreduce")
net.Allreduce(
[common_world] + blobs,
blobs,
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
for i in range(num_blobs):
np.testing.assert_array_equal(
workspace.FetchBlob(blobs[i]),
(num_blobs * comm_size) * (num_blobs * comm_size - 1) / 2)
# Run the net a few more times to check the operator
# works not just the first time it's called
for _tmp in range(4):
workspace.RunNet(net.Name())
def _test_allreduce_multicw(self,
comm_rank=None,
comm_size=None,
tmpdir=None
):
_store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
_, common_world2 = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir,
existing_cw=common_world)
blob_size = int(1e4)
num_blobs = 4
for cw in [common_world, common_world2]:
blobs = []
for i in range(num_blobs):
blob = "blob_{}".format(i)
value = np.full(blob_size, (comm_rank * num_blobs) + i, np.float32)
workspace.FeedBlob(blob, value)
blobs.append(blob)
net = core.Net("allreduce_multicw")
net.Allreduce(
[cw] + blobs,
blobs,
engine=op_engine)
workspace.RunNetOnce(net)
for i in range(num_blobs):
np.testing.assert_array_equal(
workspace.FetchBlob(blobs[i]),
(num_blobs * comm_size) * (num_blobs * comm_size - 1) / 2)
@given(comm_size=st.integers(min_value=2, max_value=8),
blob_size=st.integers(min_value=int(1e3), max_value=int(1e6)),
num_blobs=st.integers(min_value=1, max_value=4),
device_option=st.sampled_from([hu.cpu_do]),
use_float16=st.booleans())
@settings(deadline=10000)
def test_allreduce(self, comm_size, blob_size, num_blobs, device_option,
use_float16):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_allreduce,
blob_size=blob_size,
num_blobs=num_blobs,
use_float16=use_float16,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_allreduce,
comm_size=comm_size,
blob_size=blob_size,
num_blobs=num_blobs,
device_option=device_option,
tmpdir=tmpdir,
use_float16=use_float16)
def _test_reduce_scatter(self,
comm_rank=None,
comm_size=None,
blob_size=None,
num_blobs=None,
tmpdir=None,
use_float16=False
):
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
blob_size = self.synchronize(
store_handler,
blob_size,
comm_rank=comm_rank)
num_blobs = self.synchronize(
store_handler,
num_blobs,
comm_rank=comm_rank)
blobs = []
for i in range(num_blobs):
blob = "blob_{}".format(i)
value = np.full(blob_size, (comm_rank * num_blobs) + i,
np.float16 if use_float16 else np.float32)
workspace.FeedBlob(blob, value)
blobs.append(blob)
# Specify distribution among ranks i.e. number of elements
# scattered/distributed to each process.
recv_counts = np.zeros(comm_size, dtype=np.int32)
remaining = blob_size
chunk_size = (blob_size + comm_size - 1) / comm_size
for i in range(comm_size):
recv_counts[i] = min(chunk_size, remaining)
remaining = remaining - chunk_size if remaining > chunk_size else 0
recv_counts_blob = "recvCounts"
workspace.FeedBlob(recv_counts_blob, recv_counts)
blobs.append(recv_counts_blob)
net = core.Net("reduce_scatter")
net.ReduceScatter(
[common_world] + blobs,
blobs,
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
for i in range(num_blobs):
np.testing.assert_array_equal(
np.resize(workspace.FetchBlob(blobs[i]), recv_counts[comm_rank]),
(num_blobs * comm_size) * (num_blobs * comm_size - 1) / 2)
# Run the net a few more times to check the operator
# works not just the first time it's called
for _tmp in range(4):
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
blob_size=st.integers(min_value=int(1e3), max_value=int(1e6)),
num_blobs=st.integers(min_value=1, max_value=4),
device_option=st.sampled_from([hu.cpu_do]),
use_float16=st.booleans())
@settings(deadline=10000)
def test_reduce_scatter(self, comm_size, blob_size, num_blobs,
device_option, use_float16):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_reduce_scatter,
blob_size=blob_size,
num_blobs=num_blobs,
use_float16=use_float16,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_reduce_scatter,
comm_size=comm_size,
blob_size=blob_size,
num_blobs=num_blobs,
device_option=device_option,
tmpdir=tmpdir,
use_float16=use_float16)
def _test_allgather(self,
comm_rank=None,
comm_size=None,
blob_size=None,
num_blobs=None,
tmpdir=None,
use_float16=False
):
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
blob_size = self.synchronize(
store_handler,
blob_size,
comm_rank=comm_rank)
num_blobs = self.synchronize(
store_handler,
num_blobs,
comm_rank=comm_rank)
blobs = []
for i in range(num_blobs):
blob = "blob_{}".format(i)
value = np.full(blob_size, (comm_rank * num_blobs) + i,
np.float16 if use_float16 else np.float32)
workspace.FeedBlob(blob, value)
blobs.append(blob)
net = core.Net("allgather")
net.Allgather(
[common_world] + blobs,
["Gathered"],
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
# create expected output
expected_output = np.array([])
for i in range(comm_size):
for j in range(num_blobs):
value = np.full(blob_size, (i * num_blobs) + j,
np.float16 if use_float16 else np.float32)
expected_output = np.concatenate((expected_output, value))
np.testing.assert_array_equal(
workspace.FetchBlob("Gathered"), expected_output)
# Run the net a few more times to check the operator
# works not just the first time it's called
for _tmp in range(4):
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
blob_size=st.integers(min_value=int(1e3), max_value=int(1e6)),
num_blobs=st.integers(min_value=1, max_value=4),
device_option=st.sampled_from([hu.cpu_do]),
use_float16=st.booleans())
@settings(max_examples=10, deadline=None)
def test_allgather(self, comm_size, blob_size, num_blobs, device_option,
use_float16):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_allgather,
blob_size=blob_size,
num_blobs=num_blobs,
use_float16=use_float16,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_allgather,
comm_size=comm_size,
blob_size=blob_size,
num_blobs=num_blobs,
device_option=device_option,
tmpdir=tmpdir,
use_float16=use_float16)
@given(device_option=st.sampled_from([hu.cpu_do]))
@settings(deadline=10000)
def test_forked_cw(self, device_option):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_allreduce_multicw,
device_option=device_option)
else:
# Note: this test exercises the path where we fork a common world.
# We therefore don't need a comm size larger than 2. It used to be
# run with comm_size=8, which causes flaky results in a stress run.
# The flakiness was caused by too many listening sockets being
# created by Gloo context initialization (8 processes times
# 7 sockets times 20-way concurrency, plus TIME_WAIT).
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_allreduce_multicw,
comm_size=2,
device_option=device_option,
tmpdir=tmpdir)
def _test_barrier(
self,
comm_rank=None,
comm_size=None,
tmpdir=None,
):
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank, comm_size=comm_size, tmpdir=tmpdir
)
net = core.Net("barrier")
net.Barrier(
[common_world],
[],
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
# Run the net a few more times to check the operator
# works not just the first time it's called
for _tmp in range(4):
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
device_option=st.sampled_from([hu.cpu_do]))
@settings(deadline=10000)
def test_barrier(self, comm_size, device_option):
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_barrier,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_barrier,
comm_size=comm_size,
device_option=device_option,
tmpdir=tmpdir)
def _test_close_connection(
self,
comm_rank=None,
comm_size=None,
tmpdir=None,
):
'''
One node calls close connection, others wait it on barrier.
Test will check that all will exit eventually.
'''
# Caffe's for closers only:
# https://www.youtube.com/watch?v=QMFwFgG9NE8
closer = comm_rank == comm_size // 2,
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank, comm_size=comm_size, tmpdir=tmpdir
)
net = core.Net("barrier_or_close")
if not closer:
net.Barrier(
[common_world],
[],
engine=op_engine)
else:
net.DestroyCommonWorld(
[common_world], [common_world], engine=op_engine)
# Sleep a bit to ensure others start the barrier
import time
time.sleep(0.1)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
device_option=st.sampled_from([hu.cpu_do]))
@settings(deadline=10000)
def test_close_connection(self, comm_size, device_option):
import time
start_time = time.time()
TestCase.test_counter += 1
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_close_connection,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_close_connection,
comm_size=comm_size,
device_option=device_option,
tmpdir=tmpdir)
# Check that test finishes quickly because connections get closed.
# This assert used to check that the end to end runtime was less
# than 2 seconds, but this may not always be the case if there
# is significant overhead in starting processes. Ideally, this
# assert is replaced by one that doesn't depend on time but rather
# checks the success/failure status of the barrier that is run.
self.assertLess(time.time() - start_time, 20.0)
def _test_io_error(
self,
comm_rank=None,
comm_size=None,
tmpdir=None,
):
'''
Only one node will participate in allreduce, resulting in an IoError
'''
store_handler, common_world = self.create_common_world(
comm_rank=comm_rank,
comm_size=comm_size,
tmpdir=tmpdir)
if comm_rank == 0:
blob_size = 1000
num_blobs = 1
blobs = []
for i in range(num_blobs):
blob = "blob_{}".format(i)
value = np.full(
blob_size, (comm_rank * num_blobs) + i, np.float32
)
workspace.FeedBlob(blob, value)
blobs.append(blob)
net = core.Net("allreduce")
net.Allreduce(
[common_world] + blobs,
blobs,
engine=op_engine)
workspace.CreateNet(net)
workspace.RunNet(net.Name())
@given(comm_size=st.integers(min_value=2, max_value=8),
device_option=st.sampled_from([hu.cpu_do]))
@settings(deadline=10000)
def test_io_error(self, comm_size, device_option):
TestCase.test_counter += 1
with self.assertRaises(IoError):
if os.getenv('COMM_RANK') is not None:
self.run_test_distributed(
self._test_io_error,
device_option=device_option)
else:
with TemporaryDirectory() as tmpdir:
self.run_test_locally(
self._test_io_error,
comm_size=comm_size,
device_option=device_option,
tmpdir=tmpdir)
if __name__ == "__main__":
import unittest
unittest.main()
|
pytorch-master
|
caffe2/contrib/gloo/gloo_test.py
|
import numpy as np
import unittest
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
core.GlobalInit(["caffe2", "--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
"--glow_global_force_sls_fp16_accum=1"])
GLOW_LOWERED_BATCHNORM = False
def reference_spatialbn_test16(X, scale, bias, mean, var, epsilon, order):
X = X.astype(np.float16)
scale = scale.astype(np.float16)
bias = bias.astype(np.float16)
mean = mean.astype(np.float16)
# var = var.astype(np.float16)
assert(order == "NCHW")
scale = scale[np.newaxis, :, np.newaxis, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis, np.newaxis]
var = var[np.newaxis, :, np.newaxis, np.newaxis]
Y = ((X - mean) * (scale / np.sqrt(var + epsilon).astype(np.float16))) + bias
return Y.astype(np.float32)
# Test the lowered BN op
class BatchnormTest(serial.SerializedTestCase):
# TODO: using hypothesis seed, sweep dimensions
@given(seed=st.integers(0, 65535),
size=st.integers(2, 30),
input_channels=st.integers(2, 40),
batch_size=st.integers(2, 20))
@settings(deadline=datetime.timedelta(seconds=10))
def test_bn(self, seed, size, input_channels, batch_size):
workspace.ResetWorkspace()
np.random.seed(seed)
order = "NCHW"
epsilon = 1e-3
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "scale", "bias", "mean", "var"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y"],
order=order,
is_test=True,
epsilon=epsilon
)
)
if GLOW_LOWERED_BATCHNORM:
refopname = "SpatialBNFakeLoweredFp16NNPI"
else:
refopname = "SpatialBNFakeFp16NNPI"
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred"
pred_net_ref.external_input.extend(["X", "scale", "bias", "mean", "var"])
pred_net_ref.external_output.append("X")
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
refopname,
["X", "scale", "bias", "mean", "var"],
["Y"],
order=order,
is_test=True,
epsilon=epsilon
)
)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
workspace.FeedBlob("scale", scale)
workspace.FeedBlob("bias", bias)
workspace.FeedBlob("mean", mean)
workspace.FeedBlob("var", var)
# Use for reference to debug
# Y_np = reference_spatialbn_test16(X, scale, bias, mean, var, epsilon, order)
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{"X": [batch_size, input_channels, size, size],
"scale": [input_channels],
"bias": [input_channels],
"mean": [input_channels],
"var": [input_channels]},
debug=True,
adjust_batch=False,
use_onnx=False
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("X", X)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net_ref)
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob("Y")
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
if not np.allclose(Y_glow.astype(np.float16), Y_c2.astype(np.float16)):
diff = np.abs(Y_glow - Y_c2).astype(np.float16)
print_test_debug_info(
"bn",
{
"seed": seed,
"scale": scale,
"bias": bias,
"mean": mean,
"var": var,
"Y_np": Y_c2,
"Y_glow": Y_glow,
"diff": diff,
"rowwise_diff": np.max(np.abs(diff), -1)})
assert(0)
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_batchnorm_nnpi_fp16.py
|
import numpy as np
import unittest
import caffe2.python.fakelowp.init_shared_libs # noqa
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import datetime
from hypothesis import given, settings
import hypothesis.strategies as st
import caffe2.python.serialized_test.serialized_test_util as serial
core.GlobalInit(["caffe2", "--caffe2_log_level=-3", "--glow_global_fp16=1"])
class TestBatchMatMul(serial.SerializedTestCase):
@given(
C=st.integers(min_value=1, max_value=10),
M=st.integers(min_value=1, max_value=50),
K=st.integers(min_value=1, max_value=512),
N=st.integers(min_value=1, max_value=50),
rand_seed=st.integers(0, 65534),
trans_a=st.booleans(),
trans_b=st.booleans(),
run_ints=st.booleans()
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_batch_matmul(self, M, K, N, C, rand_seed, trans_a, trans_b, run_ints):
np.random.seed(rand_seed)
workspace.ResetWorkspace()
batch_dims = [C]
if run_ints:
X = np.random.randint(low=1, high=3, size=((C, M, K))).astype(np.float32)
else:
X = 100 * (np.random.rand(*(batch_dims + [M, K])).astype(np.float32) - 0.5)
if trans_a:
X = X.swapaxes(-1, -2)
if run_ints:
Y = np.random.randint(low=1, high=3, size=((C, K, N))).astype(np.float32)
else:
Y = 100 * (np.random.rand(*(batch_dims + [K, N])).astype(np.float32) - 0.5)
if trans_b:
Y = Y.swapaxes(-1, -2)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "Y"])
pred_net.external_output.append("out")
pred_net.op.add().CopyFrom(
core.CreateOperator(
'BatchMatMul', ['X', 'Y'], 'out', trans_a=trans_a, trans_b=trans_b
)
)
pred_net_ref = core.Net("pred_net_ref")
# Reference updated to fp16 with fp32 accumulation
pred_net_ref.BatchMatMulFP16Acc32Fake(
["X", "Y"], ['out'], trans_a=trans_a, trans_b=trans_b)
print("dims", batch_dims, X.shape, Y.shape)
pred_net_onnxified = onnxifi_caffe2_net(pred_net,
{"X": X.shape, "Y": Y.shape},
debug=True,
adjust_batch=False,
use_onnx=False)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("Y", Y)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(pred_net_ref)
# Run Glow net
workspace.RunNet(pred_net_onnxified.name)
out_glow = workspace.FetchBlob('out')
# Run caffe2 net
workspace.RunNet(pred_net_ref)
out_c2_fakefp16 = workspace.FetchBlob('out')
diff = np.abs(out_c2_fakefp16 - out_glow)
if not np.allclose(out_glow, out_c2_fakefp16):
print_test_debug_info("bmm", {
"seed": rand_seed,
"m": M, "k": K,
"n": N, "X": X.shape, "Y": Y.shape,
"trans_a": trans_a,
"trans_b": trans_b,
"run_ints": run_ints,
"out_glow": out_glow,
"out_c2_fakefp16": out_c2_fakefp16,
"diff": diff
})
assert(0)
if __name__ == "__main__":
unittest.main()
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_batchmatmul_nnpi_fp16.py
|
import numpy as np
import unittest
# Must happen before importing caffe2.python.*
import caffe2.python.fakelowp.init_shared_libs # noqa
from hypothesis import given, settings
from hypothesis import strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
workspace.GlobalInit(["caffe2", "--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
"--glow_global_force_sls_fp16_accum=1"])
class SparseLengthsSum4BitFakeNNPIFp16Test(serial.SerializedTestCase):
@given(seed=st.integers(0, 65535))
@settings(deadline=datetime.timedelta(seconds=10))
def test_slws_fused_4bit_rowwise_all_same(self, seed):
np.random.seed(seed)
workspace.ResetWorkspace()
n = 1
m = 2
data = np.ones((n, m)).astype(np.float32) * 0.2 - 0.1
max_segments = 5
max_segment_length = 100
num_lengths = np.random.randint(1, max_segments + 1)
# number of segments to run
lengths = np.random.randint(0, max_segment_length + 1,
size=num_lengths).astype(np.int32)
num_indices = np.sum(lengths)
indices = np.zeros(num_indices, dtype=np.int64)
weights = np.random.uniform(low=-0.5, high=0.5, size=[len(indices)])\
.astype(np.float32)
weights = np.ones(len(indices)).astype(np.float32)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused4BitRowwise",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"])
ref_net.external_output.append("Y")
ref_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused4BitRowwiseFakeFP16NNPI",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
workspace.FeedBlob("data", data)
workspace.RunOperatorOnce(
core.CreateOperator(
"FloatToFused4BitRowwiseQuantized",
['data'],
['quantized_data']
)
)
print("quantized", workspace.FetchBlob("quantized_data"))
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{},
max_batch_size=max_segments,
max_seq_size=max_segment_length,
debug=True,
adjust_batch=True,
use_onnx=False
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("lengths", lengths)
workspace.FeedBlob("weights", weights)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(ref_net)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
workspace.RunNet(ref_net.name)
Y_c2 = workspace.FetchBlob('Y')
if not np.allclose(Y_c2, Y_glow):
print_test_debug_info(
"slws_fused_4bit_rowwise",
{"seed": seed,
"indices": indices,
"data": data,
"lengths": lengths,
"weights": weights,
"Y_c2": Y_c2,
"Y_glow": Y_glow,
"diff": Y_glow - Y_c2,
"rowwise_diff": (Y_glow - Y_c2)[:, 0]})
assert(0)
@given(
seed=st.integers(0, 65535),
num_rows=st.integers(2, 20),
embedding_dim=st.sampled_from([8, 12, 16, 24, 32, 54, 64, 72, 128]),
batch_size=st.integers(1, 32),
max_weight=st.integers(0, 1),
)
@settings(deadline=datetime.timedelta(seconds=10))
def test_slws_fused_4bit_rowwise(self, seed, num_rows, embedding_dim, batch_size, max_weight):
workspace.ResetWorkspace()
np.random.seed(seed)
data = np.random.rand(num_rows, embedding_dim).astype(np.float32)
data = data * 1e-3
lengths = np.random.choice(np.arange(1, num_rows), batch_size).astype(np.int32)
_indices = []
for length in lengths:
_indices.extend(np.random.choice(np.arange(1, num_rows), length))
indices = np.asarray(_indices).astype(np.int64)
weights = np.random.uniform(
low=0,
high=max_weight,
size=[len(indices)]
).astype(np.float32) - max_weight / 2.0
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"])
pred_net.external_output.append("Y")
pred_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused4BitRowwise",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
ref_net = caffe2_pb2.NetDef()
ref_net.name = "ref"
ref_net.external_input.extend(
["quantized_data", "weights", "indices", "lengths"])
ref_net.external_output.append("Y")
ref_net.op.add().CopyFrom(
core.CreateOperator(
"SparseLengthsWeightedSumFused4BitRowwiseFakeFP16NNPI",
["quantized_data", "weights", "indices", "lengths"],
["Y"],
)
)
workspace.FeedBlob("data", data)
workspace.RunOperatorOnce(
core.CreateOperator(
"FloatToFused4BitRowwiseQuantized",
["data"],
["quantized_data"]
)
)
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
{},
max_batch_size=batch_size,
max_seq_size=np.max(lengths),
debug=True,
adjust_batch=True,
use_onnx=False
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("indices", indices)
workspace.FeedBlob("lengths", lengths)
workspace.FeedBlob("weights", weights)
workspace.CreateNet(pred_net_onnxified)
workspace.CreateNet(ref_net)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob('Y')
workspace.RunNet(ref_net.name)
Y_c2 = workspace.FetchBlob('Y')
if not np.allclose(Y_c2, Y_glow):
print_test_debug_info(
"slws_fused_4bit_rowwise",
{
"seed": seed,
"indices": indices,
"data": data.shape,
"lengths": lengths,
"weights": weights,
"Y_c2": Y_c2.shape,
"Y_glow": Y_glow.shape,
"diff": Y_glow - Y_c2,
"rowwise_diff": (Y_glow - Y_c2)[:, 0]
}
)
assert(0)
if __name__ == '__main__':
unittest.main()
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_sls_4bit_nnpi_fp16.py
|
import numpy as np
import caffe2.python.fakelowp.init_shared_libs # noqa
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.onnx.onnxifi import onnxifi_caffe2_net
from caffe2.python.fakelowp.test_utils import print_test_debug_info
from hypothesis import given, settings
from hypothesis import strategies as st
import caffe2.python.serialized_test.serialized_test_util as serial
import datetime
core.GlobalInit(["caffe2",
"--glow_global_fp16=1",
"--glow_global_fused_scale_offset_fp16=1",
"--glow_global_force_sls_fp16_accum=1"])
GLOW_LOWERED_BATCHNORM = False
# Test the lowered LayerNorm op
class LayerNorm(serial.SerializedTestCase):
@given(seed=st.integers(0, 65535),
batch_size=st.integers(min_value=1, max_value=50),
size=st.integers(min_value=2, max_value=128),
epsilon=st.floats(min_value=1e-4, max_value=1e-3),
elementwise_affine=st.booleans())
@settings(deadline=datetime.timedelta(seconds=10))
def test_layernorm(self, seed, batch_size, size, epsilon, elementwise_affine):
np.random.seed(seed)
# Reset the workspace
workspace.ResetWorkspace()
axis = 1
dims = np.array(([batch_size, size]))
X = np.random.uniform(size=dims).astype(np.float32) - 0.5
gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "gamma", "beta"])
pred_net.external_output.extend(["Y", "mean", "rstd"])
pred_net.op.add().CopyFrom(
core.CreateOperator(
"LayerNorm",
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y", "mean", "rstd"],
axis=axis,
epsilon=epsilon,
elementwise_affine=elementwise_affine
)
)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred_ref"
pred_net_ref.external_input.extend(["X", "gamma", "beta"])
pred_net_ref.external_output.extend(["Y", "mean", "rstd"])
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
"LayerNormFakeFP16NNPI",
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y", "mean", "rstd"],
axis=axis,
epsilon=epsilon,
elementwise_affine=elementwise_affine
)
)
shape_hits = {"X": X.shape, "gamma": gamma.shape, "beta": beta.shape}
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
shape_hits,
debug=True,
adjust_batch=True,
use_onnx=False
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("gamma", gamma)
workspace.FeedBlob("beta", beta)
workspace.CreateNet(pred_net_ref)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchBlob("Y")
dims1 = np.array(([1, *dims]))
X_glow = X.reshape(dims1)
workspace.FeedBlob("X", X_glow)
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchBlob("Y")
if not np.allclose(Y_glow, Y_c2):
diff_Y = np.abs(Y_glow - Y_c2)
print_test_debug_info(
"layernorm",
{
"seed": seed,
"size": size,
"batch_size": batch_size,
"epsilon": epsilon,
"gamma": gamma,
"beta": beta,
"elementwise_affine": elementwise_affine,
"X": X,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff_Y": diff_Y,
}
)
assert(0)
def _get_scale_zp(self, tensor):
tensor_max = np.max(tensor)
tensor_min = min(0, np.min(tensor))
scale = np.float32(np.float16((tensor_max - tensor_min) / 255.0))
if scale < 1e-6:
scale = np.float32(1e-6)
zero_point = 0 - tensor_min / scale
zero_point = int(round(np.clip(zero_point, 0, 255.0)))
return (scale, zero_point)
def _layernorm_transform(self, X):
mean = np.mean(X, axis=1)
mean_exp = np.outer(mean, np.ones(X.shape[1]))
std = np.std(X, axis=1)
std_exp = np.outer(std, np.ones(X.shape[1]))
Y = (X - mean_exp) / std_exp
return Y
@given(seed=st.integers(0, 65535),
batch_size=st.integers(min_value=1, max_value=50),
size=st.integers(min_value=2, max_value=128),
epsilon=st.floats(min_value=1e-4, max_value=1e-3),
elementwise_affine=st.booleans())
@settings(deadline=datetime.timedelta(seconds=10))
# re-enable when T74553975 gets fixed
def test_fused_ln_quantize(self, seed, batch_size, size, epsilon, elementwise_affine):
np.random.seed(seed)
# Reset the workspace
workspace.ResetWorkspace()
axis = 1
dims = np.array(([batch_size, size]))
X = np.random.uniform(size=dims).astype(np.float32) - 0.5
gamma = np.random.randn(*X.shape[axis:]).astype(np.float32)
beta = np.random.randn(*X.shape[axis:]).astype(np.float32)
Y = self._layernorm_transform(X)
scale, zp = self._get_scale_zp(Y)
pred_net = caffe2_pb2.NetDef()
pred_net.name = "pred"
pred_net.external_input.extend(["X", "gamma", "beta"])
pred_net.external_output.extend(["Y_q"])
pred_net.op.add().CopyFrom(
core.CreateOperator(
"LayerNorm",
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y", "mean", "rstd"],
axis=axis,
epsilon=epsilon,
elementwise_affine=elementwise_affine
)
)
pred_net.op.add().CopyFrom(
core.CreateOperator(
"Int8Quantize", ["Y"], ["Y_q"], Y_scale=scale, Y_zero_point=zp
)
)
print(pred_net)
pred_net_ref = caffe2_pb2.NetDef()
pred_net_ref.name = "pred_ref"
pred_net_ref.external_input.extend(["X", "gamma", "beta"])
pred_net_ref.external_output.extend(["Y_q"])
pred_net_ref.op.add().CopyFrom(
core.CreateOperator(
"LayerNormInt8QuantizeFakeNNPI",
["X", "gamma", "beta"] if elementwise_affine else ["X"],
["Y_q", "mean", "rstd"],
axis=axis,
epsilon=epsilon,
elementwise_affine=elementwise_affine,
Y_scale=scale, Y_zero_point=zp
)
)
shape_hits = {"X": X.shape, "gamma": gamma.shape, "beta": beta.shape}
pred_net_onnxified = onnxifi_caffe2_net(
pred_net,
shape_hits,
debug=True,
adjust_batch=True,
use_onnx=False
)
num_onnxified_ops = sum(
1 if o.type == "Onnxifi" else 0 for o in pred_net_onnxified.op)
np.testing.assert_equal(num_onnxified_ops, 1)
workspace.FeedBlob("X", X)
workspace.FeedBlob("gamma", gamma)
workspace.FeedBlob("beta", beta)
workspace.CreateNet(pred_net_ref)
workspace.CreateNet(pred_net_onnxified)
workspace.RunNet(pred_net_ref.name)
Y_c2 = workspace.FetchInt8Blob("Y_q")
workspace.RunNet(pred_net_onnxified.name)
Y_glow = workspace.FetchInt8Blob("Y_q")
if not np.allclose(Y_glow.data, Y_c2.data) or \
Y_glow.scale != Y_c2.scale or Y_glow.zero_point != Y_c2.zero_point:
diff_Y = np.abs(Y_glow.data.astype(np.float32) - Y_c2.data.astype(np.float32))
print_test_debug_info(
"layernorm",
{
"seed": seed,
"size": size,
"batch_size": batch_size,
"epsilon": epsilon,
"gamma": gamma,
"beta": beta,
"elementwise_affine": elementwise_affine,
"X": X,
"Y_glow": Y_glow,
"Y_c2": Y_c2,
"diff_Y": diff_Y,
}
)
assert(0)
|
pytorch-master
|
caffe2/contrib/fakelowp/test/test_layernorm_nnpi_fp16.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.