// SPDX-License-Identifier: Apache-2.0 #include "gtest/gtest.h" #include "kompute/Kompute.hpp" #include "kompute/logger/Logger.hpp" #include "shaders/Utils.hpp" TEST(TestPushConstants, TestConstantsAlgoDispatchOverride) { { std::string shader(R"( #version 450 layout(push_constant) uniform PushConstants { float x; float y; float z; } pcs; layout (local_size_x = 1) in; layout(set = 0, binding = 0) buffer a { float pa[]; }; void main() { pa[0] += pcs.x; pa[1] += pcs.y; pa[2] += pcs.z; })"); std::vector spirv = compileSource(shader); std::shared_ptr sq = nullptr; { kp::Manager mgr; std::shared_ptr> tensor = mgr.tensor({ 0, 0, 0 }); std::shared_ptr algo = mgr.algorithm( { tensor }, spirv, kp::Workgroup({ 1 }), {}, { 0.0, 0.0, 0.0 }); sq = mgr.sequence()->eval({ tensor }); // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float sq->eval(algo, std::vector{ 0.1, 0.2, 0.3 }); sq->eval(algo, std::vector{ 0.3, 0.2, 0.1 }); sq->eval({ tensor }); EXPECT_EQ(tensor->vector(), std::vector({ 0.4, 0.4, 0.4 })); } } } TEST(TestPushConstants, TestConstantsAlgoDispatchNoOverride) { { std::string shader(R"( #version 450 layout(push_constant) uniform PushConstants { float x; float y; float z; } pcs; layout (local_size_x = 1) in; layout(set = 0, binding = 0) buffer a { float pa[]; }; void main() { pa[0] += pcs.x; pa[1] += pcs.y; pa[2] += pcs.z; })"); std::vector spirv = compileSource(shader); std::shared_ptr sq = nullptr; { kp::Manager mgr; std::shared_ptr> tensor = mgr.tensor({ 0, 0, 0 }); std::shared_ptr algo = mgr.algorithm( { tensor }, spirv, kp::Workgroup({ 1 }), {}, { 0.1, 0.2, 0.3 }); sq = mgr.sequence()->eval({ tensor }); // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float sq->eval(algo); sq->eval(algo, std::vector{ 0.3, 0.2, 0.1 }); sq->eval({ tensor }); EXPECT_EQ(tensor->vector(), std::vector({ 0.4, 0.4, 0.4 })); } } } TEST(TestPushConstants, TestConstantsWrongSize) { { std::string shader(R"( #version 450 layout(push_constant) uniform PushConstants { float x; float y; float z; } pcs; layout (local_size_x = 1) in; layout(set = 0, binding = 0) buffer a { float pa[]; }; void main() { pa[0] += pcs.x; pa[1] += pcs.y; pa[2] += pcs.z; })"); std::vector spirv = compileSource(shader); std::shared_ptr sq = nullptr; { kp::Manager mgr; std::shared_ptr> tensor = mgr.tensor({ 0, 0, 0 }); std::shared_ptr algo = mgr.algorithm( { tensor }, spirv, kp::Workgroup({ 1 }), {}, { 0.0 }); sq = mgr.sequence()->record({ tensor }); EXPECT_THROW(sq->record( algo, std::vector{ 0.1, 0.2, 0.3 }), std::runtime_error); } } } // TODO: Ensure different types are considered for push constants // TEST(TestPushConstants, TestConstantsWrongType) // { // { // std::string shader(R"( // #version 450 // layout(push_constant) uniform PushConstants { // float x; // float y; // float z; // } pcs; // layout (local_size_x = 1) in; // layout(set = 0, binding = 0) buffer a { float pa[]; }; // void main() { // pa[0] += pcs.x; // pa[1] += pcs.y; // pa[2] += pcs.z; // })"); // // std::vector spirv = compileSource(shader); // // std::shared_ptr sq = nullptr; // // { // kp::Manager mgr; // // std::shared_ptr> tensor = // mgr.tensor({ 0, 0, 0 }); // // std::shared_ptr algo = mgr.algorithm( // { tensor }, spirv, kp::Workgroup({ 1 }), {}, { 0.0 }); // // sq = mgr.sequence()->record({ tensor }); // // EXPECT_THROW(sq->record( // algo, std::vector{ 1, 2, 3 }), // std::runtime_error); // } // } // } TEST(TestPushConstants, TestConstantsMixedTypes) { { std::string shader(R"( #version 450 layout(push_constant) uniform PushConstants { float x; uint y; int z; } pcs; layout (local_size_x = 1) in; layout(set = 0, binding = 0) buffer a { float pa[]; }; void main() { pa[0] += pcs.x; pa[1] += pcs.y - 2147483000; pa[2] += pcs.z; })"); struct TestConsts { float x; uint32_t y; int32_t z; }; std::vector spirv = compileSource(shader); std::shared_ptr sq = nullptr; { kp::Manager mgr; std::shared_ptr> tensor = mgr.tensorT({ 0, 0, 0 }); std::shared_ptr algo = mgr.algorithm( { tensor }, spirv, kp::Workgroup({ 1 }), {}, { { 0, 0, 0 } }); sq = mgr.sequence()->eval({ tensor }); // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float sq->eval( algo, std::vector{ { 15.32, 2147483650, 10 } }); sq->eval( algo, std::vector{ { 30.32, 2147483650, -3 } }); sq->eval({ tensor }); EXPECT_EQ(tensor->vector(), std::vector({ 45.64, 1300, 7 })); } } } TEST(TestPushConstants, TestConstantsInt) { { std::string shader(R"( #version 450 layout(push_constant) uniform PushConstants { int x; int y; int z; } pcs; layout (local_size_x = 1) in; layout(set = 0, binding = 0) buffer a { int pa[]; }; void main() { pa[0] += pcs.x; pa[1] += pcs.y; pa[2] += pcs.z; })"); std::vector spirv = compileSource(shader); std::shared_ptr sq = nullptr; { kp::Manager mgr; std::shared_ptr> tensor = mgr.tensorT({ -1, -1, -1 }); std::shared_ptr algo = mgr.algorithm( { tensor }, spirv, kp::Workgroup({ 1 }), {}, { { 0, 0, 0 } }); sq = mgr.sequence()->eval({ tensor }); // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float sq->eval( algo, std::vector{ { -1, -1, -1 } }); sq->eval( algo, std::vector{ { -1, -1, -1 } }); sq->eval({ tensor }); EXPECT_EQ(tensor->vector(), std::vector({ -3, -3, -3 })); } } } TEST(TestPushConstants, TestConstantsUnsignedInt) { { std::string shader(R"( #version 450 layout(push_constant) uniform PushConstants { uint x; uint y; uint z; } pcs; layout (local_size_x = 1) in; layout(set = 0, binding = 0) buffer a { uint pa[]; }; void main() { pa[0] += pcs.x; pa[1] += pcs.y; pa[2] += pcs.z; })"); std::vector spirv = compileSource(shader); std::shared_ptr sq = nullptr; { kp::Manager mgr; std::shared_ptr> tensor = mgr.tensorT({ 0, 0, 0 }); std::shared_ptr algo = mgr.algorithm( { tensor }, spirv, kp::Workgroup({ 1 }), {}, { { 0, 0, 0 } }); sq = mgr.sequence()->eval({ tensor }); // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float sq->eval( algo, std::vector{ { 2147483650, 2147483650, 2147483650 } }); sq->eval(algo, std::vector{ { 5, 5, 5 } }); sq->eval({ tensor }); EXPECT_EQ( tensor->vector(), std::vector({ 2147483655, 2147483655, 2147483655 })); } } } TEST(TestPushConstants, TestConstantsDouble) { { std::string shader(R"( #version 450 layout(push_constant) uniform PushConstants { double x; double y; double z; } pcs; layout (local_size_x = 1) in; layout(set = 0, binding = 0) buffer a { double pa[]; }; void main() { pa[0] += pcs.x; pa[1] += pcs.y; pa[2] += pcs.z; })"); std::vector spirv = compileSource(shader); std::shared_ptr sq = nullptr; { kp::Manager mgr; std::shared_ptr> tensor = mgr.tensorT({ 0, 0, 0 }); std::shared_ptr algo = mgr.algorithm( { tensor }, spirv, kp::Workgroup({ 1 }), {}, { { 0, 0, 0 } }); sq = mgr.sequence()->eval({ tensor }); // We need to run this in sequence to avoid race condition // We can't use atomicAdd as swiftshader doesn't support it for // float sq->eval( algo, std::vector{ { 1.1111222233334444, 2.1111222233334444, 3.1111222233334444 } }); sq->eval( algo, std::vector{ { 1.1111222233334444, 2.1111222233334444, 3.1111222233334444 } }); sq->eval({ tensor }); EXPECT_EQ(tensor->vector(), std::vector({ 2.2222444466668888, 4.2222444466668888, 6.2222444466668888 })); } } }