repo_name
stringclasses 10
values | file_path
stringlengths 29
222
| content
stringlengths 24
926k
| extention
stringclasses 5
values |
---|---|---|---|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/src/multiple_reductions_buffers.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize inputs and outputs
std::vector<int> data(N);
for (int i = 0; i < N; i++) data[i] = i;
int sum = 0, min = 0, max = 0;
{
//# create buffers
buffer buf_data(data);
buffer buf_sum(&sum, range(1));
buffer buf_min(&min, range(1));
buffer buf_max(&max, range(1));
q.submit([&](handler& h) {
//# create accessors for data and results
accessor acc_data(buf_data, h, read_only);
//# define reduction objects for sum, min, max reduction
auto reduction_sum = reduction(buf_sum, h, plus<>());
auto reduction_min = reduction(buf_min, h, minimum<>());
auto reduction_max = reduction(buf_max, h, maximum<>());
//# parallel_for with multiple reduction objects
h.parallel_for(nd_range<1>{N, B}, reduction_sum, reduction_min, reduction_max, [=](nd_item<1> it, auto& temp_sum, auto& temp_min, auto& temp_max) {
auto i = it.get_global_id();
temp_sum.combine(acc_data[i]);
temp_min.combine(acc_data[i]);
temp_max.combine(acc_data[i]);
});
});
}
//# compute mid-range
auto mid_range = (min+max)/2.f;
//# print results
std::cout << "Sum = " << sum << "\n";
std::cout << "Min = " << min << "\n";
std::cout << "Max = " << max << "\n";
std::cout << "Mid-Range = " << mid_range << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/src/sum_subgroup_reduce.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
static constexpr size_t S = 32; // sub_group size
int main() {
//# setup queue
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# use parallel_for and sub_groups to calculate sum
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item)[[intel::reqd_sub_group_size(S)]] {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# Adds all elements in sub_group using sub_group reduce
int sum_sg = reduce_over_group(sg, data[i], plus<>());
//# write sub_group sum to first location for each sub_group
if (sg.get_local_id()[0] == 0) data[i] = sum_sg;
});
q.single_task([=](){
int sum = 0;
for(int i=0;i<N;i+=S){
sum += data[i];
}
data[0] = sum;
});
std::cout << "Sum = " << data[0] << "\n";
free(data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/src/sum_workgroup_reduce.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
//# setup queue with in_order property
queue q(property::queue::in_order{});
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# use parallel_for to calculate sum for work_group using reduce
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item){
auto wg = item.get_group();
auto i = item.get_global_id(0);
//# Adds all elements in work_group using work_group reduce
int sum_wg = reduce_over_group(wg, data[i], plus<>());
//# write work_group sum to first location for each work_group
if (item.get_local_id(0) == 0) data[i] = sum_wg;
});
q.single_task([=](){
int sum = 0;
for(int i=0;i<N;i+=B){
sum += data[i];
}
data[0] = sum;
}).wait();
std::cout << "Sum = " << data[0] << "\n";
free(data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/src/sum_reduction_buffers.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
std::vector<int> data(N);
for (int i = 0; i < N; i++) data[i] = i;
int sum = 0;
{
//# create buffers for data and sum
buffer buf_data(data);
buffer buf_sum(&sum, range(1));
q.submit([&](handler& h) {
//# create accessors for buffer
accessor acc_data(buf_data, h, read_only);
//# nd-range kernel parallel_for with reduction parameter
h.parallel_for(nd_range<1>{N, B}, reduction(buf_sum, h, plus<>()), [=](nd_item<1> it, auto& temp) {
auto i = it.get_global_id(0);
temp.combine(acc_data[i]);
});
});
}
std::cout << "Sum = " << sum << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/src/reduction_lab.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
//# setup queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# implicit USM for writing min and max value
int* min = malloc_shared<int>(1, q);
int* max = malloc_shared<int>(1, q);
*min = 0;
*max = 0;
//# STEP 1 : Create reduction objects for computing min and max
auto reduction_min = reduction(min, minimum<>());
auto reduction_max = reduction(max, maximum<>());
//# Reduction Kernel get min and max
q.submit([&](handler& h) {
//# STEP 2 : add parallel_for with reduction objects for min and max
h.parallel_for(nd_range<1>{N, B}, reduction_min, reduction_max, [=](nd_item<1> it, auto& temp_min, auto& temp_max) {
auto i = it.get_global_id(0);
temp_min.combine(data[i]);
temp_max.combine(data[i]);
});
}).wait();
//# STEP 3 : Compute mid_range from min and max
int mid_range = 0
mid_range = (min[0] + max[0])/2;
std::cout << "Mid-Range = " << mid_range << "\n";
free(data, q);
free(min, q);
free(max, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/lab/reduction_custom_operator.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <time.h>
using namespace sycl;
static constexpr size_t N = 256; // global size
static constexpr size_t B = 64; // work-group size
template <typename T, typename I>
struct pair {
bool operator<(const pair& o) const {
return val <= o.val || (val == o.val && idx <= o.idx);
}
T val;
I idx;
};
int main() {
//# setup queue with default selector
queue q;
//# initialize input data and result using usm
auto result = malloc_shared<pair<int, int>>(1, q);
auto data = malloc_shared<int>(N, q);
//# initialize input data with random numbers
srand(time(0));
for (int i = 0; i < N; ++i) data[i] = rand() % 256;
std::cout << "Input Data:\n";
for (int i = 0; i < N; i++) std::cout << data[i] << " "; std::cout << "\n\n";
//# custom operator for reduction to find minumum and index
pair<int, int> operator_identity = {std::numeric_limits<int>::max(), std::numeric_limits<int>::min()};
*result = operator_identity;
auto reduction_object = reduction(result, operator_identity, minimum<pair<int, int>>());
//# parallel_for with user defined reduction object
q.parallel_for(nd_range<1>{N, B}, reduction_object, [=](nd_item<1> item, auto& temp) {
int i = item.get_global_id(0);
temp.combine({data[i], i});
}).wait();
std::cout << "Minimum value and index = " << result->val << " at " << result->idx << "\n";
free(result, q);
free(data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/lab/sum_single_task.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
int main() {
//# setup sycl::queue with default device selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# user single_task to add all numbers
q.single_task([=](){
int sum = 0;
for(int i=0;i<N;i++){
sum += data[i];
}
data[0] = sum;
}).wait();
std::cout << "Sum = " << data[0] << "\n";
free(data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/lab/sum_reduction_usm.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
//# setup queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# implicit USM for writing sum value
int* sum = malloc_shared<int>(1, q);
*sum = 0;
//# nd-range kernel parallel_for with reduction parameter
q.parallel_for(nd_range<1>{N, B}, reduction(sum, plus<>()), [=](nd_item<1> it, auto& temp) {
auto i = it.get_global_id(0);
temp.combine(data[i]);
}).wait();
std::cout << "Sum = " << *sum << "\n";
free(data, q);
free(sum, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/lab/sum_work_group.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
//# setup queue with in_order property
queue q(property::queue::in_order{});
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# use parallel_for to calculate sum for each work_group
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item){
size_t index = item.get_global_id(0);
if(item.get_local_id(0) == 0 ){
int sum_wg = 0;
for(int i=index; i<index+B; i++){
sum_wg += data[i];
}
data[index] = sum_wg;
}
});
q.single_task([=](){
int sum = 0;
for(int i=0;i<N;i+=B){
sum += data[i];
}
data[0] = sum;
}).wait();
std::cout << "Sum = " << data[0] << "\n";
free(data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/lab/multiple_reductions_buffers.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize inputs and outputs
std::vector<int> data(N);
for (int i = 0; i < N; i++) data[i] = i;
int sum = 0, min = 0, max = 0;
{
//# create buffers
buffer buf_data(data);
buffer buf_sum(&sum, range(1));
buffer buf_min(&min, range(1));
buffer buf_max(&max, range(1));
q.submit([&](handler& h) {
//# create accessors for data and results
accessor acc_data(buf_data, h, read_only);
//# define reduction objects for sum, min, max reduction
auto reduction_sum = reduction(buf_sum, h, plus<>());
auto reduction_min = reduction(buf_min, h, minimum<>());
auto reduction_max = reduction(buf_max, h, maximum<>());
//# parallel_for with multiple reduction objects
h.parallel_for(nd_range<1>{N, B}, reduction_sum, reduction_min, reduction_max, [=](nd_item<1> it, auto& temp_sum, auto& temp_min, auto& temp_max) {
auto i = it.get_global_id();
temp_sum.combine(acc_data[i]);
temp_min.combine(acc_data[i]);
temp_max.combine(acc_data[i]);
});
});
}
//# print results
std::cout << "Sum = " << sum << "\n";
std::cout << "Min = " << min << "\n";
std::cout << "Max = " << max << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/lab/sum_subgroup_reduce.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
static constexpr size_t S = 32; // sub_group size
int main() {
//# setup queue
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# use parallel_for and sub_groups to calculate sum
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item)[[intel::reqd_sub_group_size(S)]] {
auto sg = item.get_sub_group();
auto i = item.get_global_id(0);
//# Adds all elements in sub_group using sub_group reduce
int sum_sg = reduce_over_group(sg, data[i], plus<>());
//# write sub_group sum to first location for each sub_group
if (sg.get_local_id()[0] == 0) data[i] = sum_sg;
});
q.single_task([=](){
int sum = 0;
for(int i=0;i<N;i+=S){
sum += data[i];
}
data[0] = sum;
});
std::cout << "Sum = " << data[0] << "\n";
free(data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/lab/sum_workgroup_reduce.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
//# setup queue with in_order property
queue q(property::queue::in_order{});
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# use parallel_for to calculate sum for work_group using reduce
q.parallel_for(nd_range<1>(N, B), [=](nd_item<1> item){
auto wg = item.get_group();
auto i = item.get_global_id(0);
//# Adds all elements in work_group using work_group reduce
int sum_wg = reduce_over_group(wg, data[i], plus<>());
//# write work_group sum to first location for each work_group
if (item.get_local_id(0) == 0) data[i] = sum_wg;
});
q.single_task([=](){
int sum = 0;
for(int i=0;i<N;i+=B){
sum += data[i];
}
data[0] = sum;
}).wait();
std::cout << "Sum = " << data[0] << "\n";
free(data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/lab/sum_reduction_buffers.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
std::vector<int> data(N);
for (int i = 0; i < N; i++) data[i] = i;
int sum = 0;
{
//# create buffers for data and sum
buffer buf_data(data);
buffer buf_sum(&sum, range(1));
q.submit([&](handler& h) {
//# create accessors for buffer
accessor acc_data(buf_data, h, read_only);
//# nd-range kernel parallel_for with reduction parameter
h.parallel_for(nd_range<1>{N, B}, reduction(buf_sum, h, plus<>()), [=](nd_item<1> it, auto& temp) {
auto i = it.get_global_id(0);
temp.combine(acc_data[i]);
});
});
}
std::cout << "Sum = " << sum << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/oneapi-essentials-training/08_SYCL_Reduction/lab/reduction_lab.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
using namespace sycl;
static constexpr size_t N = 1024; // global size
static constexpr size_t B = 128; // work-group size
int main() {
//# setup queue with default selector
queue q;
std::cout << "Device : " << q.get_device().get_info<info::device::name>() << "\n";
//# initialize data array using usm
auto data = malloc_shared<int>(N, q);
for (int i = 0; i < N; i++) data[i] = i;
//# implicit USM for writing min and max value
int* min = malloc_shared<int>(1, q);
int* max = malloc_shared<int>(1, q);
*min = 0;
*max = 0;
//# STEP 1 : Create reduction objects for computing min and max
//# YOUR CODE GOES HERE
//# Reduction Kernel get min and max
q.submit([&](handler& h) {
//# STEP 2 : add parallel_for with reduction objects for min and max
//# YOUR CODE GOES HERE
}).wait();
//# STEP 3 : Compute mid_range from min and max
int mid_range = 0;
//# YOUR CODE GOES HERE
std::cout << "Mid-Range = " << mid_range << "\n";
free(data, q);
free(min, q);
free(max, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/02_Thread_Mapping_and_Occupancy/src/gpu_support.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q;
//# Query for device information
auto device_name = q.get_device().get_info<sycl::info::device::name>();
auto wg_size = q.get_device().get_info<sycl::info::device::max_work_group_size>();
auto sg_sizes = q.get_device().get_info<sycl::info::device::sub_group_sizes>();
auto slm_size = q.get_device().get_info<sycl::info::device::local_mem_size>();
std::cout << "Device : " << device_name << "\n";
std::cout << "Max Work-Group Size : " << wg_size << "\n";
std::cout << "Supported Sub-Group Sizes : ";
for (int i=0; i<sg_sizes.size(); i++) std::cout << sg_sizes[i] << " "; std::cout << "\n";
std::cout << "Local Memory Size : " << slm_size << "\n";
q.submit([&](sycl::handler &h){
h.parallel_for(sycl::nd_range<3>(sycl::range<3>(112, 120, 128), sycl::range<3>(1, 1, 128)), [=](sycl::nd_item<3> item)[[intel::reqd_sub_group_size(32)]] {
// Kernel Code
});
}).wait();
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/02_Thread_Mapping_and_Occupancy/src/vec_add.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#define N 13762560
template <int groups, int wg_size, int sg_size>
int VectorAdd(sycl::queue &q, std::vector<int> &a, std::vector<int> &b,
std::vector<int> &sum) {
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf(sum.data(), num_items);
size_t num_groups = groups;
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
q.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(
sycl::nd_range<1>(num_groups * wg_size, wg_size), [=
](sycl::nd_item<1> index) [[intel::reqd_sub_group_size(sg_size)]] {
size_t grp_id = index.get_group()[0];
size_t loc_id = index.get_local_id();
size_t start = grp_id * N;
size_t end = start + N;
for (size_t i = start + loc_id; i < end; i += wg_size) {
sum_acc[i] = a_acc[i] + b_acc[i];
}
});
});
q.wait();
auto end = std::chrono::high_resolution_clock::now().time_since_epoch().count();
std::cout << "VectorAdd<" << groups << "> completed on device - "
<< (end - start) * 1e-9 << " seconds\n";
return 0;
}
int main() {
sycl::queue q;
std::vector<int> a(N), b(N), sum(N);
for (size_t i = 0; i < a.size(); i++){
a[i] = i;
b[i] = i;
sum[i] = 0;
}
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
VectorAdd<1,256,32>(q, a, b, sum);
VectorAdd<2,256,32>(q, a, b, sum);
VectorAdd<3,256,32>(q, a, b, sum);
VectorAdd<4,256,32>(q, a, b, sum);
VectorAdd<5,256,32>(q, a, b, sum);
VectorAdd<6,256,32>(q, a, b, sum);
VectorAdd<7,256,32>(q, a, b, sum);
VectorAdd<8,256,32>(q, a, b, sum);
VectorAdd<12,256,32>(q, a, b, sum);
VectorAdd<16,256,32>(q, a, b, sum);
VectorAdd<20,256,32>(q, a, b, sum);
VectorAdd<24,256,32>(q, a, b, sum);
VectorAdd<28,256,32>(q, a, b, sum);
VectorAdd<32,256,32>(q, a, b, sum);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/02_Thread_Mapping_and_Occupancy/lab/gpu_support.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q;
//# Query for device information
auto device_name = q.get_device().get_info<sycl::info::device::name>();
auto wg_size = q.get_device().get_info<sycl::info::device::max_work_group_size>();
auto sg_sizes = q.get_device().get_info<sycl::info::device::sub_group_sizes>();
auto slm_size = q.get_device().get_info<sycl::info::device::local_mem_size>();
std::cout << "Device : " << device_name << "\n";
std::cout << "Max Work-Group Size : " << wg_size << "\n";
std::cout << "Supported Sub-Group Sizes : ";
for (int i=0; i<sg_sizes.size(); i++) std::cout << sg_sizes[i] << " "; std::cout << "\n";
std::cout << "Local Memory Size : " << slm_size << "\n";
q.submit([&](sycl::handler &h){
h.parallel_for(sycl::nd_range<3>(sycl::range<3>(112, 120, 128), sycl::range<3>(1, 1, 128)), [=](sycl::nd_item<3> item)[[intel::reqd_sub_group_size(32)]] {
// Kernel Code
});
}).wait();
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/02_Thread_Mapping_and_Occupancy/lab/vec_add.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#define N 13762560
template <int groups, int wg_size, int sg_size>
int VectorAdd(sycl::queue &q, std::vector<int> &a, std::vector<int> &b,
std::vector<int> &sum) {
sycl::range num_items{a.size()};
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf(sum.data(), num_items);
size_t num_groups = groups;
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
q.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(
sycl::nd_range<1>(num_groups * wg_size, wg_size), [=
](sycl::nd_item<1> index) [[intel::reqd_sub_group_size(sg_size)]] {
size_t grp_id = index.get_group()[0];
size_t loc_id = index.get_local_id();
size_t start = grp_id * N;
size_t end = start + N;
for (size_t i = start + loc_id; i < end; i += wg_size) {
sum_acc[i] = a_acc[i] + b_acc[i];
}
});
});
q.wait();
auto end = std::chrono::high_resolution_clock::now().time_since_epoch().count();
std::cout << "VectorAdd<" << groups << "> completed on device - "
<< (end - start) * 1e-9 << " seconds\n";
return 0;
}
int main() {
sycl::queue q;
std::vector<int> a(N), b(N), sum(N);
for (size_t i = 0; i < a.size(); i++){
a[i] = i;
b[i] = i;
sum[i] = 0;
}
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
VectorAdd<1,256,32>(q, a, b, sum);
VectorAdd<2,256,32>(q, a, b, sum);
VectorAdd<3,256,32>(q, a, b, sum);
VectorAdd<4,256,32>(q, a, b, sum);
VectorAdd<5,256,32>(q, a, b, sum);
VectorAdd<6,256,32>(q, a, b, sum);
VectorAdd<7,256,32>(q, a, b, sum);
VectorAdd<8,256,32>(q, a, b, sum);
VectorAdd<12,256,32>(q, a, b, sum);
VectorAdd<16,256,32>(q, a, b, sum);
VectorAdd<20,256,32>(q, a, b, sum);
VectorAdd<24,256,32>(q, a, b, sum);
VectorAdd<28,256,32>(q, a, b, sum);
VectorAdd<32,256,32>(q, a, b, sum);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/15_Implicit_Explicit_Scaling/src/sub_device.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main(){
sycl::queue q;
sycl::device RootDevice = q.get_device();
std::cout << "Device: " << RootDevice.get_info<sycl::info::device::name>() << "\n";
std::cout << "-EUs : " << RootDevice.get_info<sycl::info::device::max_compute_units>() << "\n\n";
//# Check if GPU can be partitioned (Stack)
auto partitions = RootDevice.get_info<sycl::info::device::partition_max_sub_devices>();
if(partitions > 0){
std::cout << "-partition_max_sub_devices: " << partitions << "\n\n";
std::vector<sycl::device> SubDevices = RootDevice.create_sub_devices<
sycl::info::partition_property::partition_by_affinity_domain>(
sycl::info::partition_affinity_domain::numa);
for (auto &SubDevice : SubDevices) {
std::cout << "Sub-Device: " << SubDevice.get_info<sycl::info::device::name>() << "\n";
std::cout << "-EUs : " << SubDevice.get_info<sycl::info::device::max_compute_units>() << "\n";
}
}
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/15_Implicit_Explicit_Scaling/src/vectoradd_explicit_scaling.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <algorithm>
#include <cassert>
#include <cfloat>
#include <iostream>
#include <string>
namespace sycl;
constexpr int num_runs = 10;
constexpr size_t scalar = 3;
cl_ulong triad(size_t array_size) {
cl_ulong min_time_ns0 = DBL_MAX;
cl_ulong min_time_ns1 = DBL_MAX;
device dev = device(gpu_selector());
std::vector<device> subdev = {};
subdev = dev.create_sub_devices<sycl::info::partition_property::
partition_by_affinity_domain>(sycl::info::partition_affinity_domain::numa);
queue q[2] = {queue(subdev[0], property::queue::enable_profiling{}),
queue(subdev[1], property::queue::enable_profiling{})};
std::cout << "Running on device: " <<
q[0].get_device().get_info<info::device::name>() << "\n";
std::cout << "Running on device: " <<
q[1].get_device().get_info<info::device::name>() << "\n";
double *A0 = malloc_shared<double>(array_size/2 * sizeof(double), q[0]);
double *B0 = malloc_shared<double>(array_size/2 * sizeof(double), q[0]);
double *C0 = malloc_shared<double>(array_size/2 * sizeof(double), q[0]);
double *A1 = malloc_shared<double>(array_size/2 * sizeof(double), q[1]);
double *B1 = malloc_shared<double>(array_size/2 * sizeof(double), q[1]);
double *C1 = malloc_shared<double>(array_size/2 * sizeof(double), q[1]);
for ( int i = 0; i < array_size/2; i++) {
A0[i]= 1.0; B0[i]= 2.0; C0[i]= 0.0;
A1[i]= 1.0; B1[i]= 2.0; C1[i]= 0.0;
}
for (int i = 0; i< num_runs; i++) {
auto q0_event = q[0].submit([&](handler& h) {
h.parallel_for(array_size/2, [=](id<1> idx) {
C0[idx] = A0[idx] + B0[idx] * scalar;
});
});
auto q1_event = q[1].submit([&](handler& h) {
h.parallel_for(array_size/2, [=](id<1> idx) {
C1[idx] = A1[idx] + B1[idx] * scalar;
});
});
q[0].wait();
q[1].wait();
cl_ulong exec_time_ns0 =
q0_event.get_profiling_info<info::event_profiling::command_end>() -
q0_event.get_profiling_info<info::event_profiling::command_start>();
std::cout << "Tile-0 Execution time (iteration " << i << ") [sec]: "
<< (double)exec_time_ns0 * 1.0E-9 << "\n";
min_time_ns0 = std::min(min_time_ns0, exec_time_ns0);
cl_ulong exec_time_ns1 =
q1_event.get_profiling_info<info::event_profiling::command_end>() -
q1_event.get_profiling_info<info::event_profiling::command_start>();
std::cout << "Tile-1 Execution time (iteration " << i << ") [sec]: "
<< (double)exec_time_ns1 * 1.0E-9 << "\n";
min_time_ns1 = std::min(min_time_ns1, exec_time_ns1);
}
// Check correctness
bool error = false;
for ( int i = 0; i < array_size/2; i++) {
if ((C0[i] != A0[i] + scalar * B0[i]) || (C1[i] != A1[i] + scalar * B1[i])) {
std::cout << "\nResult incorrect (element " << i << " is " << C0[i] << ")!\n";
error = true;
}
}
sycl::free(A0, q[0]);
sycl::free(B0, q[0]);
sycl::free(C0, q[0]);
sycl::free(A1, q[1]);
sycl::free(B1, q[1]);
sycl::free(C1, q[1]);
if (error) return -1;
std::cout << "Results are correct!\n\n";
return std::max(min_time_ns0, min_time_ns1);
}
int main(int argc, char *argv[]) {
size_t array_size;
if (argc > 1 ) {
array_size = std::stoi(argv[1]);
}
else {
std::cout << "Run as ./<progname> <arraysize in elements>\n";
return 1;
}
std::cout << "Running with stream size of " << array_size
<< " elements (" << (array_size * sizeof(double))/(double)1024/1024 << "MB)\n";
cl_ulong min_time = triad(array_size);
if (min_time == -1) return 1;
size_t triad_bytes = 3 * sizeof(double) * array_size;
std::cout << "Triad Bytes: " << triad_bytes << "\n";
std::cout << "Time in sec (fastest run): " << min_time * 1.0E-9 << "\n";
double triad_bandwidth = 1.0E-09 * triad_bytes/(min_time*1.0E-9);
std::cout << "Bandwidth of fastest run in GB/s: " << triad_bandwidth << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/15_Implicit_Explicit_Scaling/lab/sub_device.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main(){
sycl::queue q;
sycl::device RootDevice = q.get_device();
std::cout << "Device: " << RootDevice.get_info<sycl::info::device::name>() << "\n";
std::cout << "-EUs : " << RootDevice.get_info<sycl::info::device::max_compute_units>() << "\n\n";
//# Check if GPU can be partitioned (Stack)
auto partitions = RootDevice.get_info<sycl::info::device::partition_max_sub_devices>();
if(partitions > 0){
std::cout << "-partition_max_sub_devices: " << partitions << "\n\n";
std::vector<sycl::device> SubDevices = RootDevice.create_sub_devices<
sycl::info::partition_property::partition_by_affinity_domain>(
sycl::info::partition_affinity_domain::numa);
for (auto &SubDevice : SubDevices) {
std::cout << "Sub-Device: " << SubDevice.get_info<sycl::info::device::name>() << "\n";
std::cout << "-EUs : " << SubDevice.get_info<sycl::info::device::max_compute_units>() << "\n";
}
}
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/15_Implicit_Explicit_Scaling/lab/vectoradd_explicit_scaling.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <CL/sycl.hpp>
#include <algorithm>
#include <cassert>
#include <cfloat>
#include <iostream>
#include <string>
namespace sycl;
constexpr int num_runs = 10;
constexpr size_t scalar = 3;
cl_ulong triad(size_t array_size) {
cl_ulong min_time_ns0 = DBL_MAX;
cl_ulong min_time_ns1 = DBL_MAX;
device dev = device(gpu_selector());
std::vector<device> subdev = {};
subdev = dev.create_sub_devices<sycl::info::partition_property::
partition_by_affinity_domain>(sycl::info::partition_affinity_domain::numa);
queue q[2] = {queue(subdev[0], property::queue::enable_profiling{}),
queue(subdev[1], property::queue::enable_profiling{})};
std::cout << "Running on device: " <<
q[0].get_device().get_info<info::device::name>() << "\n";
std::cout << "Running on device: " <<
q[1].get_device().get_info<info::device::name>() << "\n";
double *A0 = malloc_shared<double>(array_size/2 * sizeof(double), q[0]);
double *B0 = malloc_shared<double>(array_size/2 * sizeof(double), q[0]);
double *C0 = malloc_shared<double>(array_size/2 * sizeof(double), q[0]);
double *A1 = malloc_shared<double>(array_size/2 * sizeof(double), q[1]);
double *B1 = malloc_shared<double>(array_size/2 * sizeof(double), q[1]);
double *C1 = malloc_shared<double>(array_size/2 * sizeof(double), q[1]);
for ( int i = 0; i < array_size/2; i++) {
A0[i]= 1.0; B0[i]= 2.0; C0[i]= 0.0;
A1[i]= 1.0; B1[i]= 2.0; C1[i]= 0.0;
}
for (int i = 0; i< num_runs; i++) {
auto q0_event = q[0].submit([&](handler& h) {
h.parallel_for(array_size/2, [=](id<1> idx) {
C0[idx] = A0[idx] + B0[idx] * scalar;
});
});
auto q1_event = q[1].submit([&](handler& h) {
h.parallel_for(array_size/2, [=](id<1> idx) {
C1[idx] = A1[idx] + B1[idx] * scalar;
});
});
q[0].wait();
q[1].wait();
cl_ulong exec_time_ns0 =
q0_event.get_profiling_info<info::event_profiling::command_end>() -
q0_event.get_profiling_info<info::event_profiling::command_start>();
std::cout << "Tile-0 Execution time (iteration " << i << ") [sec]: "
<< (double)exec_time_ns0 * 1.0E-9 << "\n";
min_time_ns0 = std::min(min_time_ns0, exec_time_ns0);
cl_ulong exec_time_ns1 =
q1_event.get_profiling_info<info::event_profiling::command_end>() -
q1_event.get_profiling_info<info::event_profiling::command_start>();
std::cout << "Tile-1 Execution time (iteration " << i << ") [sec]: "
<< (double)exec_time_ns1 * 1.0E-9 << "\n";
min_time_ns1 = std::min(min_time_ns1, exec_time_ns1);
}
// Check correctness
bool error = false;
for ( int i = 0; i < array_size/2; i++) {
if ((C0[i] != A0[i] + scalar * B0[i]) || (C1[i] != A1[i] + scalar * B1[i])) {
std::cout << "\nResult incorrect (element " << i << " is " << C0[i] << ")!\n";
error = true;
}
}
sycl::free(A0, q[0]);
sycl::free(B0, q[0]);
sycl::free(C0, q[0]);
sycl::free(A1, q[1]);
sycl::free(B1, q[1]);
sycl::free(C1, q[1]);
if (error) return -1;
std::cout << "Results are correct!\n\n";
return std::max(min_time_ns0, min_time_ns1);
}
int main(int argc, char *argv[]) {
size_t array_size;
if (argc > 1 ) {
array_size = std::stoi(argv[1]);
}
else {
std::cout << "Run as ./<progname> <arraysize in elements>\n";
return 1;
}
std::cout << "Running with stream size of " << array_size
<< " elements (" << (array_size * sizeof(double))/(double)1024/1024 << "MB)\n";
cl_ulong min_time = triad(array_size);
if (min_time == -1) return 1;
size_t triad_bytes = 3 * sizeof(double) * array_size;
std::cout << "Triad Bytes: " << triad_bytes << "\n";
std::cout << "Time in sec (fastest run): " << min_time * 1.0E-9 << "\n";
double triad_bandwidth = 1.0E-09 * triad_bytes/(min_time*1.0E-9);
std::cout << "Bandwidth of fastest run in GB/s: " << triad_bandwidth << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/src/reduction_sg.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum = 0;
int work_group_size = 256;
int log2elements_per_work_item = 6;
int elements_per_work_item = (1 << log2elements_per_work_item); // 256
int num_work_items = data.size() / elements_per_work_item;
int num_work_groups = num_work_items / work_group_size;
std::cout << "Num work items = " << num_work_items << std::endl;
std::cout << "Num work groups = " << num_work_groups << std::endl;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
sycl::buffer<sycl::vec<int, 8>> accum_buf(num_work_groups);
auto e = q.submit([&](auto &h) {
const sycl::accessor buf_acc(buf, h);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<sycl::vec<int, 8>, 1> scratch(work_group_size, h);
h.parallel_for(
sycl::nd_range<1>{num_work_items, work_group_size}, [=
](sycl::nd_item<1> item) [[intel::reqd_sub_group_size(16)]] {
size_t glob_id = item.get_global_id(0);
size_t group_id = item.get_group(0);
size_t loc_id = item.get_local_id(0);
sycl::ext::oneapi::sub_group sg = item.get_sub_group();
sycl::vec<int, 8> sum{0, 0, 0, 0, 0, 0, 0, 0};
using global_ptr =
sycl::multi_ptr<int, sycl::access::address_space::global_space>;
int base = (group_id * work_group_size +
sg.get_group_id()[0] * sg.get_local_range()[0]) *
elements_per_work_item;
for (size_t i = 0; i < elements_per_work_item / 8; i++)
sum += sg.load<8>(global_ptr(&buf_acc[base + i * 128]));
scratch[loc_id] = sum;
for (int i = work_group_size / 2; i > 0; i >>= 1) {
sycl::group_barrier(item.get_group());
if (loc_id < i)
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
accum_acc[group_id] = scratch[0];
});
});
q.wait();
{
sycl::host_accessor h_acc(accum_buf);
sycl::vec<int, 8> res{0, 0, 0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_work_groups; i++)
res += h_acc[i];
sum = 0;
for (int i = 0; i < 8; i++)
sum += res[i];
}
sycl::host_accessor h_acc(sum_buf);
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/src/reduction_slm.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum = 0;
int work_group_size = 256;
int log2elements_per_block = 13;
int elements_per_block = (1 << log2elements_per_block); // 8192
int log2workitems_per_block = 8;
int workitems_per_block = (1 << log2workitems_per_block); // 256
int elements_per_work_item = elements_per_block / workitems_per_block;
int mask = ~(~0 << log2workitems_per_block);
int num_work_items = data.size() / elements_per_work_item;
int num_work_groups = num_work_items / work_group_size;
std::cout << "Num work items = " << num_work_items << std::endl;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
sycl::buffer<int> accum_buf(num_work_groups);
auto e = q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>{num_work_items, work_group_size},
[=](sycl::nd_item<1> item) {
size_t glob_id = item.get_global_id(0);
size_t group_id = item.get_group(0);
size_t loc_id = item.get_local_id(0);
int offset = ((glob_id >> log2workitems_per_block)
<< log2elements_per_block) +
(glob_id & mask);
int sum = 0;
for (size_t i = 0; i < elements_per_work_item; i++)
sum +=
buf_acc[(i << log2workitems_per_block) + offset];
scratch[loc_id] = sum;
// Serial Reduction
sycl::group_barrier(item.get_group());
if (loc_id == 0) {
int sum = 0;
for (int i = 0; i < work_group_size; i++)
sum += scratch[i];
accum_acc[group_id] = sum;
}
});
});
q.wait();
{
sum = 0;
sycl::host_accessor h_acc(accum_buf);
for (int i = 0; i < num_work_groups; i++)
sum += h_acc[i];
}
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/src/reduction_sycl.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
auto e = q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
auto sum_reduction = sycl::reduction(sum_buf, h, sycl::plus<>());
h.parallel_for(sycl::nd_range<1>{N, 256}, sum_reduction,
[=](sycl::nd_item<1> item, auto &sum_wg) {
int i = item.get_global_id(0);
sum_wg += buf_acc[i];
});
});
sycl::host_accessor h_acc(sum_buf);
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/src/reduction_atomics.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
auto e = q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto index) {
size_t glob_id = index[0];
auto v = sycl::atomic_ref<int,
sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(sum_acc[0]);
v.fetch_add(buf_acc[glob_id]);
});
});
sycl::host_accessor h_acc(sum_buf);
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/src/reduction-tree-vectorize.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_processing_elements =
q.get_device().get_info<sycl::info::device::max_compute_units>();
int vec_size =
q.get_device().get_info<sycl::info::device::native_vector_width_int>();
int num_work_items = num_processing_elements * vec_size;
std::cout << "Num work items = " << num_work_items << std::endl;
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> accum_buf(num_work_items);
auto e = q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_items, [=](auto index) {
size_t glob_id = index[0];
int sum = 0;
for (size_t i = glob_id; i < N; i += num_work_items)
sum += buf_acc[i];
accum_acc[glob_id] = sum;
});
});
sycl::host_accessor h_acc(accum_buf);
for (int i = 0; i < num_work_items; i++) sum += h_acc[i];
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/src/reduction_sycl_blocks.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum;
int work_group_size = 256;
int log2elements_per_block = 13;
int elements_per_block = (1 << log2elements_per_block); // 8192
int log2workitems_per_block = 8;
int workitems_per_block = (1 << log2workitems_per_block); // 256
int elements_per_work_item = elements_per_block / workitems_per_block;
int mask = ~(~0 << log2workitems_per_block);
int num_work_items = data.size() / elements_per_work_item;
int num_work_groups = num_work_items / work_group_size;
std::cout << "Num work items = " << num_work_items << std::endl;
std::cout << "Num work groups = " << num_work_groups << std::endl;
std::cout << "Elements per item = " << elements_per_work_item << std::endl;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
auto e = q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
auto sumr = sycl::reduction(sum_buf, h, sycl::plus<>());
h.parallel_for(sycl::nd_range<1>{num_work_items, work_group_size}, sumr,
[=](sycl::nd_item<1> item, auto &sumr_arg) {
size_t glob_id = item.get_global_id(0);
size_t group_id = item.get_group(0);
size_t loc_id = item.get_local_id(0);
int offset = ((glob_id >> log2workitems_per_block)
<< log2elements_per_block) +
(glob_id & mask);
int sum = 0;
for (size_t i = 0; i < elements_per_work_item; i++)
sum +=
buf_acc[(i << log2workitems_per_block) + offset];
sumr_arg += sum;
});
});
sycl::host_accessor h_acc(sum_buf);
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/lab/reduction_sg.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum = 0;
int work_group_size = 256;
int log2elements_per_work_item = 6;
int elements_per_work_item = (1 << log2elements_per_work_item); // 256
int num_work_items = data.size() / elements_per_work_item;
int num_work_groups = num_work_items / work_group_size;
std::cout << "Num work items = " << num_work_items << std::endl;
std::cout << "Num work groups = " << num_work_groups << std::endl;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
sycl::buffer<sycl::vec<int, 8>> accum_buf(num_work_groups);
auto e = q.submit([&](auto &h) {
const sycl::accessor buf_acc(buf, h);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<sycl::vec<int, 8>, 1> scratch(work_group_size, h);
h.parallel_for(
sycl::nd_range<1>{num_work_items, work_group_size}, [=
](sycl::nd_item<1> item) [[intel::reqd_sub_group_size(16)]] {
size_t glob_id = item.get_global_id(0);
size_t group_id = item.get_group(0);
size_t loc_id = item.get_local_id(0);
sycl::ext::oneapi::sub_group sg = item.get_sub_group();
sycl::vec<int, 8> sum{0, 0, 0, 0, 0, 0, 0, 0};
using global_ptr =
sycl::multi_ptr<int, sycl::access::address_space::global_space>;
int base = (group_id * work_group_size +
sg.get_group_id()[0] * sg.get_local_range()[0]) *
elements_per_work_item;
for (size_t i = 0; i < elements_per_work_item / 8; i++)
sum += sg.load<8>(global_ptr(&buf_acc[base + i * 128]));
scratch[loc_id] = sum;
for (int i = work_group_size / 2; i > 0; i >>= 1) {
sycl::group_barrier(item.get_group());
if (loc_id < i)
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
accum_acc[group_id] = scratch[0];
});
});
q.wait();
{
sycl::host_accessor h_acc(accum_buf);
sycl::vec<int, 8> res{0, 0, 0, 0, 0, 0, 0, 0};
for (int i = 0; i < num_work_groups; i++)
res += h_acc[i];
sum = 0;
for (int i = 0; i < 8; i++)
sum += res[i];
}
sycl::host_accessor h_acc(sum_buf);
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/lab/reduction_slm.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum = 0;
int work_group_size = 256;
int log2elements_per_block = 13;
int elements_per_block = (1 << log2elements_per_block); // 8192
int log2workitems_per_block = 8;
int workitems_per_block = (1 << log2workitems_per_block); // 256
int elements_per_work_item = elements_per_block / workitems_per_block;
int mask = ~(~0 << log2workitems_per_block);
int num_work_items = data.size() / elements_per_work_item;
int num_work_groups = num_work_items / work_group_size;
std::cout << "Num work items = " << num_work_items << std::endl;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
sycl::buffer<int> accum_buf(num_work_groups);
auto e = q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
h.parallel_for(sycl::nd_range<1>{num_work_items, work_group_size},
[=](sycl::nd_item<1> item) {
size_t glob_id = item.get_global_id(0);
size_t group_id = item.get_group(0);
size_t loc_id = item.get_local_id(0);
int offset = ((glob_id >> log2workitems_per_block)
<< log2elements_per_block) +
(glob_id & mask);
int sum = 0;
for (size_t i = 0; i < elements_per_work_item; i++)
sum +=
buf_acc[(i << log2workitems_per_block) + offset];
scratch[loc_id] = sum;
// Serial Reduction
sycl::group_barrier(item.get_group());
if (loc_id == 0) {
int sum = 0;
for (int i = 0; i < work_group_size; i++)
sum += scratch[i];
accum_acc[group_id] = sum;
}
});
});
q.wait();
{
sum = 0;
sycl::host_accessor h_acc(accum_buf);
for (int i = 0; i < num_work_groups; i++)
sum += h_acc[i];
}
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/lab/reduction_sycl.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
auto e = q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
auto sum_reduction = sycl::reduction(sum_buf, h, sycl::plus<>());
h.parallel_for(sycl::nd_range<1>{N, 256}, sum_reduction,
[=](sycl::nd_item<1> item, auto &sum_wg) {
int i = item.get_global_id(0);
sum_wg += buf_acc[i];
});
});
sycl::host_accessor h_acc(sum_buf);
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/lab/reduction_atomics.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
auto e = q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto index) {
size_t glob_id = index[0];
auto v = sycl::atomic_ref<int,
sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(sum_acc[0]);
v.fetch_add(buf_acc[glob_id]);
});
});
sycl::host_accessor h_acc(sum_buf);
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/lab/reduction-tree-vectorize.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_processing_elements =
q.get_device().get_info<sycl::info::device::max_compute_units>();
int vec_size =
q.get_device().get_info<sycl::info::device::native_vector_width_int>();
int num_work_items = num_processing_elements * vec_size;
std::cout << "Num work items = " << num_work_items << std::endl;
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> accum_buf(num_work_items);
auto e = q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor accum_acc(accum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_work_items, [=](auto index) {
size_t glob_id = index[0];
int sum = 0;
for (size_t i = glob_id; i < N; i += num_work_items)
sum += buf_acc[i];
accum_acc[glob_id] = sum;
});
});
sycl::host_accessor h_acc(accum_buf);
for (int i = 0; i < num_work_items; i++) sum += h_acc[i];
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/09_Kernel_Reduction/lab/reduction_sycl_blocks.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = (1000 * 1024 * 1024);
int main(int argc, char *argv[]) {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
int sum;
int work_group_size = 256;
int log2elements_per_block = 13;
int elements_per_block = (1 << log2elements_per_block); // 8192
int log2workitems_per_block = 8;
int workitems_per_block = (1 << log2workitems_per_block); // 256
int elements_per_work_item = elements_per_block / workitems_per_block;
int mask = ~(~0 << log2workitems_per_block);
int num_work_items = data.size() / elements_per_work_item;
int num_work_groups = num_work_items / work_group_size;
std::cout << "Num work items = " << num_work_items << std::endl;
std::cout << "Num work groups = " << num_work_groups << std::endl;
std::cout << "Elements per item = " << elements_per_work_item << std::endl;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data.size(), props);
sycl::buffer<int> sum_buf(&sum, 1, props);
auto e = q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
auto sumr = sycl::reduction(sum_buf, h, sycl::plus<>());
h.parallel_for(sycl::nd_range<1>{num_work_items, work_group_size}, sumr,
[=](sycl::nd_item<1> item, auto &sumr_arg) {
size_t glob_id = item.get_global_id(0);
size_t group_id = item.get_group(0);
size_t loc_id = item.get_local_id(0);
int offset = ((glob_id >> log2workitems_per_block)
<< log2elements_per_block) +
(glob_id & mask);
int sum = 0;
for (size_t i = 0; i < elements_per_work_item; i++)
sum +=
buf_acc[(i << log2workitems_per_block) + offset];
sumr_arg += sum;
});
});
sycl::host_accessor h_acc(sum_buf);
std::cout << "Sum = " << sum << "\n";
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/src/sg_mem_access_1.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(sycl::nd_range(sycl::range{N / 16}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
sycl::ext::oneapi::sub_group sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
i = (i / sgSize) * sgSize * 16 + (i % sgSize);
for (int j = 0; j < sgSize * 16; j += sgSize) {
data[i + j] = data2[i + j];
}
});
});
q.wait();
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) << " ns\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/src/sg_mem_access_2.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(
sycl::nd_range(sycl::range{N / 16}, sycl::range{32}), [=
](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
sycl::ext::oneapi::sub_group sg = it.get_sub_group();
sycl::vec<int, 8> x;
using global_ptr =
sycl::multi_ptr<int, sycl::access::address_space::global_space>;
int base = (it.get_group(0) * 32 +
sg.get_group_id()[0] * sg.get_local_range()[0]) *
16;
x = sg.load<8>(global_ptr(&(data2[base + 0])));
sg.store<8>(global_ptr(&(data[base + 0])), x);
x = sg.load<8>(global_ptr(&(data2[base + 128])));
sg.store<8>(global_ptr(&(data[base + 128])), x);
});
});
q.wait();
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) << " ns\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/src/sg_max_size.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 15;
int main() {
sycl::queue q;
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()<< "\n";
int *data = sycl::malloc_shared<int>(N + N + 2, q);
for (int i = 0; i < N + N + 2; i++) {
data[i] = i;
}
// Snippet begin
auto e = q.submit([&](auto &h) {
sycl::stream out(65536, 128, h);
h.parallel_for(
sycl::nd_range<1>(15, 15), [=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
int i = it.get_global_linear_id();
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgMaxSize = sg.get_max_local_range()[0];
int sId = sg.get_local_id()[0];
int j = data[i];
int k = data[i + sgSize];
out << "globalId = " << i << " sgMaxSize = " << sgMaxSize
<< " sgSize = " << sgSize << " sId = " << sId << " j = " << j
<< " k = " << k << sycl::endl;
});
});
q.wait();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/src/sg_mem_access_0.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(sycl::nd_range(sycl::range{N / 16}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
i = i * 16;
for (int j = i; j < (i + 16); j++) {
data[j] = data2[j];
}
});
});
q.wait();
std::cout << "Kernel time = " << (e.template get_profiling_info< sycl::info::event_profiling::command_end>() - e.template get_profiling_info< sycl::info::event_profiling::command_start>())<< " ns\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/src/sg_size.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q;
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()<< "\n";
q.submit([&](auto &h) {
sycl::stream out(65536, 256, h);
h.parallel_for(sycl::nd_range<1>(32,32), [=](sycl::nd_item<1> it) {
int groupId = it.get_group(0);
int globalId = it.get_global_linear_id();
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroupId = sg.get_group_id()[0];
int sgId = sg.get_local_id()[0];
out << "globalId = " << sycl::setw(2) << globalId
<< " groupId = " << groupId
<< " sgGroupId = " << sgGroupId << " sgId = " << sgId
<< " sgSize = " << sycl::setw(2) << sgSize
<< sycl::endl;
});
});
q.wait();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/src/sg_shuffle.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iomanip>
constexpr size_t N = 16;
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<unsigned int> matrix(N * N);
for (int i = 0; i < N * N; ++i) {
matrix[i] = i;
}
std::cout << "Matrix: " << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
std::cout << std::setw(3) << matrix[i * N + j] << " ";
}
std::cout << std::endl;
}
{
constexpr size_t blockSize = 16;
sycl::buffer<unsigned int, 2> m(matrix.data(), sycl::range<2>(N, N));
auto e = q.submit([&](auto &h) {
sycl::accessor marr(m, h);
sycl::local_accessor<unsigned int, 2> barr1(sycl::range<2>(blockSize, blockSize), h);
sycl::local_accessor<unsigned int, 2> barr2(sycl::range<2>(blockSize, blockSize), h);
h.parallel_for(
sycl::nd_range<2>(sycl::range<2>(N / blockSize, N),
sycl::range<2>(1, blockSize)),
[=](sycl::nd_item<2> it) [[intel::reqd_sub_group_size(16)]] {
int gi = it.get_group(0);
int gj = it.get_group(1);
sycl::sub_group sg = it.get_sub_group();
int sgId = sg.get_local_id()[0];
unsigned int bcol[blockSize];
int ai = blockSize * gi;
int aj = blockSize * gj;
for (int k = 0; k < blockSize; k++) {
bcol[k] = sg.load(marr.get_pointer() + (ai + k) * N + aj);
}
unsigned int tcol[blockSize];
for (int n = 0; n < blockSize; n++) {
if (sgId == n) {
for (int k = 0; k < blockSize; k++) {
tcol[k] = sycl::select_from_group(sg, bcol[n], k);
}
}
}
for (int k = 0; k < blockSize; k++) {
sg.store(marr.get_pointer() + (ai + k) * N + aj, tcol[k]);
}
});
});
q.wait();
size_t kernel_time = (e.template get_profiling_info< sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "\nKernel Execution Time: " << kernel_time * 1e-6 << " msec\n";
}
std::cout << std::endl << "Transposed Matrix: " << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
std::cout << std::setw(3) << matrix[i * N + j] << " ";
}
std::cout << std::endl;
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/lab/sg_mem_access_1.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(sycl::nd_range(sycl::range{N / 16}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
sycl::ext::oneapi::sub_group sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
i = (i / sgSize) * sgSize * 16 + (i % sgSize);
for (int j = 0; j < sgSize * 16; j += sgSize) {
data[i + j] = data2[i + j];
}
});
});
q.wait();
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) << " ns\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/lab/sg_mem_access_2.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(
sycl::nd_range(sycl::range{N / 16}, sycl::range{32}), [=
](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
sycl::ext::oneapi::sub_group sg = it.get_sub_group();
sycl::vec<int, 8> x;
using global_ptr =
sycl::multi_ptr<int, sycl::access::address_space::global_space>;
int base = (it.get_group(0) * 32 +
sg.get_group_id()[0] * sg.get_local_range()[0]) *
16;
x = sg.load<8>(global_ptr(&(data2[base + 0])));
sg.store<8>(global_ptr(&(data[base + 0])), x);
x = sg.load<8>(global_ptr(&(data2[base + 128])));
sg.store<8>(global_ptr(&(data[base + 128])), x);
});
});
q.wait();
std::cout << "Kernel time = " << (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>()) << " ns\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/lab/sg_max_size.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 15;
int main() {
sycl::queue q;
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()<< "\n";
int *data = sycl::malloc_shared<int>(N + N + 2, q);
for (int i = 0; i < N + N + 2; i++) {
data[i] = i;
}
// Snippet begin
auto e = q.submit([&](auto &h) {
sycl::stream out(65536, 128, h);
h.parallel_for(
sycl::nd_range<1>(15, 15), [=](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
int i = it.get_global_linear_id();
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgMaxSize = sg.get_max_local_range()[0];
int sId = sg.get_local_id()[0];
int j = data[i];
int k = data[i + sgSize];
out << "globalId = " << i << " sgMaxSize = " << sgMaxSize
<< " sgSize = " << sgSize << " sId = " << sId << " j = " << j
<< " k = " << k << sycl::endl;
});
});
q.wait();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/lab/sg_mem_access_0.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int N = 1024 * 1024;
int *data = sycl::malloc_shared<int>(N, q);
int *data2 = sycl::malloc_shared<int>(N, q);
memset(data2, 0xFF, sizeof(int) * N);
auto e = q.submit([&](auto &h) {
h.parallel_for(sycl::nd_range(sycl::range{N / 16}, sycl::range{32}),
[=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
i = i * 16;
for (int j = i; j < (i + 16); j++) {
data[j] = data2[j];
}
});
});
q.wait();
std::cout << "Kernel time = " << (e.template get_profiling_info< sycl::info::event_profiling::command_end>() - e.template get_profiling_info< sycl::info::event_profiling::command_start>())<< " ns\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/lab/sg_size.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q;
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()<< "\n";
q.submit([&](auto &h) {
sycl::stream out(65536, 256, h);
h.parallel_for(sycl::nd_range<1>(32,32), [=](sycl::nd_item<1> it) {
int groupId = it.get_group(0);
int globalId = it.get_global_linear_id();
auto sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroupId = sg.get_group_id()[0];
int sgId = sg.get_local_id()[0];
out << "globalId = " << sycl::setw(2) << globalId
<< " groupId = " << groupId
<< " sgGroupId = " << sgGroupId << " sgId = " << sgId
<< " sgSize = " << sycl::setw(2) << sgSize
<< sycl::endl;
});
});
q.wait();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/07_Sub_Groups/lab/sg_shuffle.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <iomanip>
constexpr size_t N = 16;
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<unsigned int> matrix(N * N);
for (int i = 0; i < N * N; ++i) {
matrix[i] = i;
}
std::cout << "Matrix: " << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
std::cout << std::setw(3) << matrix[i * N + j] << " ";
}
std::cout << std::endl;
}
{
constexpr size_t blockSize = 16;
sycl::buffer<unsigned int, 2> m(matrix.data(), sycl::range<2>(N, N));
auto e = q.submit([&](auto &h) {
sycl::accessor marr(m, h);
sycl::local_accessor<unsigned int, 2> barr1(sycl::range<2>(blockSize, blockSize), h);
sycl::local_accessor<unsigned int, 2> barr2(sycl::range<2>(blockSize, blockSize), h);
h.parallel_for(
sycl::nd_range<2>(sycl::range<2>(N / blockSize, N),
sycl::range<2>(1, blockSize)),
[=](sycl::nd_item<2> it) [[intel::reqd_sub_group_size(16)]] {
int gi = it.get_group(0);
int gj = it.get_group(1);
sycl::sub_group sg = it.get_sub_group();
int sgId = sg.get_local_id()[0];
unsigned int bcol[blockSize];
int ai = blockSize * gi;
int aj = blockSize * gj;
for (int k = 0; k < blockSize; k++) {
bcol[k] = sg.load(marr.get_pointer() + (ai + k) * N + aj);
}
unsigned int tcol[blockSize];
for (int n = 0; n < blockSize; n++) {
if (sgId == n) {
for (int k = 0; k < blockSize; k++) {
tcol[k] = sycl::select_from_group(sg, bcol[n], k);
}
}
}
for (int k = 0; k < blockSize; k++) {
sg.store(marr.get_pointer() + (ai + k) * N + aj, tcol[k]);
}
});
});
q.wait();
size_t kernel_time = (e.template get_profiling_info< sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "\nKernel Execution Time: " << kernel_time * 1e-6 << " msec\n";
}
std::cout << std::endl << "Transposed Matrix: " << std::endl;
for (int i = 0; i < N; i++) {
for (int j = 0; j < N; j++) {
std::cout << std::setw(3) << matrix[i * N + j] << " ";
}
std::cout << std::endl;
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/06_Shared_Local_Memory/src/histogram_256_int.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = (long)rand() % 256;
input[i] |= ((long)rand() % 256) << 8;
input[i] |= ((long)rand() % 256) << 16;
input[i] |= ((long)rand() % 256) << 24;
input[i] |= ((long)rand() % 256) << 32;
input[i] |= ((long)rand() % 256) << 40;
input[i] |= ((long)rand() % 256) << 48;
input[i] |= ((long)rand() % 256) << 56;
}
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int blockSize = 256;
constexpr int NUM_BINS = 256;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
auto hacc = hbuf.get_access<sycl::access::mode::atomic>(h);
h.parallel_for(
sycl::nd_range(sycl::range{N / blockSize}, sycl::range{64}), [=
](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
sycl::sub_group sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
unsigned int histogram[NUM_BINS];
for (int k = 0; k < NUM_BINS; k++) {
histogram[k] = 0;
}
for (int k = 0; k < blockSize; k++) {
unsigned long x =
sg.load(macc.get_pointer() + group * gSize * blockSize +
sgGroup * sgSize * blockSize + sgSize * k);
#pragma unroll
for (int i = 0; i < 8; i++) {
unsigned int c = x & 0x1FU;
histogram[c] += 1;
x = x >> 8;
}
}
for (int k = 0; k < NUM_BINS; k++) {
hacc[k].fetch_add(histogram[k]);
}
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec" << std::endl;
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/06_Shared_Local_Memory/src/convolution_global.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = rand();
}
for (int i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, 256), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
int t = 0;
if ((group == 0) || (group == N / gSize - 1)) {
if (i < M / 2) {
for (int j = M / 2 - i, k = 0; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
} else {
if (i + M / 2 >= N) {
for (int j = 0, k = i - M / 2; j < M / 2 + N - i;
j++, k++) {
t += iacc[k] * kacc[j];
}
} else {
for (int j = 0, k = i - M / 2; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
}
}
} else {
for (int j = 0, k = i - M / 2; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec\n";
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/06_Shared_Local_Memory/src/slm_bank.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int N = 32;
auto data = sycl::malloc_shared<int>(N, q);
auto e = q.submit([&](auto &h) {
sycl::local_accessor<int, 1> slm(sycl::range(32 * 64), h);
h.parallel_for(sycl::nd_range(sycl::range{N}, sycl::range{32}), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int j = it.get_local_linear_id();
slm[j * 16] = 0;
sycl::group_barrier(it.get_group());
for (int m = 0; m < 1024 * 1024; m++) {
slm[j * 16] += i * m;
sycl::group_barrier(it.get_group());
}
data[i] = slm[j * 16];
});
});
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>())
<< " ns\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/06_Shared_Local_Memory/src/histogram_256_slm.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = (long)rand() % 256;
input[i] |= ((long)rand() % 256) << 8;
input[i] |= ((long)rand() % 256) << 16;
input[i] |= ((long)rand() % 256) << 24;
input[i] |= ((long)rand() % 256) << 32;
input[i] |= ((long)rand() % 256) << 40;
input[i] |= ((long)rand() % 256) << 48;
input[i] |= ((long)rand() % 256) << 56;
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
constexpr int NUM_BINS = 256;
constexpr int blockSize = 256;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
sycl::accessor hacc(hbuf, h, sycl::read_write);
sycl::local_accessor<unsigned int> local_histogram(sycl::range(NUM_BINS),
h);
h.parallel_for(
sycl::nd_range(sycl::range{N / blockSize}, sycl::range{64}),
[=](sycl::nd_item<1> it) {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
sycl::sub_group sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
int factor = NUM_BINS / gSize;
int local_id = it.get_local_id()[0];
if ((factor <= 1) && (local_id < NUM_BINS)) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[local_id]);
local_bin.store(0);
} else {
for (int k = 0; k < factor; k++) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[gSize * k + local_id]);
local_bin.store(0);
}
}
sycl::group_barrier(it.get_group());
for (int k = 0; k < blockSize; k++) {
unsigned long x =
sg.load(macc.get_pointer() + group * gSize * blockSize +
sgGroup * sgSize * blockSize + sgSize * k);
#pragma unroll
for (std::uint8_t shift : {0, 8, 16, 24, 32, 40, 48, 56}) {
constexpr unsigned long mask = 0xFFU;
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[(x >> shift) & mask]);
local_bin += 1;
}
}
sycl::group_barrier(it.get_group());
if ((factor <= 1) && (local_id < NUM_BINS)) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[local_id]);
sycl::atomic_ref<unsigned long, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>
global_bin(hacc[local_id]);
global_bin += local_bin.load();
} else {
for (int k = 0; k < factor; k++) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[gSize * k + local_id]);
sycl::atomic_ref<unsigned long, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>
global_bin(hacc[gSize * k + local_id]);
global_bin += local_bin.load();
}
}
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/06_Shared_Local_Memory/src/convolution_slm.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = rand();
}
for (int i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
sycl::local_accessor<int, 1> ciacc(sycl::range(256 + (M / 2) * 2), h);
h.parallel_for(sycl::nd_range<1>(N, 256), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
int local_id = it.get_local_id()[0];
ciacc[local_id + M / 2] = iacc[i];
if (local_id == 0) {
if (group == 0) {
for (int j = 0; j < M / 2; j++) {
ciacc[j] = 0;
}
} else {
for (int j = 0, k = i - M / 2; j < M / 2; j++, k++) {
ciacc[j] = iacc[k];
}
}
}
if (local_id == gSize - 1) {
if (group == it.get_group_range()[0] - 1) {
for (int j = gSize + M / 2;
j < gSize + M / 2 + M / 2; j++) {
ciacc[j] = 0;
}
} else {
for (int j = gSize + M / 2, k = i + 1;
j < gSize + M / 2 + M / 2; j++, k++) {
ciacc[j] = iacc[k];
}
}
}
sycl::group_barrier(it.get_group());
int t = 0;
for (int j = 0, k = local_id; j < M; j++, k++) {
t += ciacc[k] * kacc[j];
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec\n";
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/06_Shared_Local_Memory/lab/histogram_256_int.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = (long)rand() % 256;
input[i] |= ((long)rand() % 256) << 8;
input[i] |= ((long)rand() % 256) << 16;
input[i] |= ((long)rand() % 256) << 24;
input[i] |= ((long)rand() % 256) << 32;
input[i] |= ((long)rand() % 256) << 40;
input[i] |= ((long)rand() % 256) << 48;
input[i] |= ((long)rand() % 256) << 56;
}
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int blockSize = 256;
constexpr int NUM_BINS = 256;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
auto hacc = hbuf.get_access<sycl::access::mode::atomic>(h);
h.parallel_for(
sycl::nd_range(sycl::range{N / blockSize}, sycl::range{64}), [=
](sycl::nd_item<1> it) [[intel::reqd_sub_group_size(16)]] {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
sycl::sub_group sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
unsigned int histogram[NUM_BINS];
for (int k = 0; k < NUM_BINS; k++) {
histogram[k] = 0;
}
for (int k = 0; k < blockSize; k++) {
unsigned long x =
sg.load(macc.get_pointer() + group * gSize * blockSize +
sgGroup * sgSize * blockSize + sgSize * k);
#pragma unroll
for (int i = 0; i < 8; i++) {
unsigned int c = x & 0x1FU;
histogram[c] += 1;
x = x >> 8;
}
}
for (int k = 0; k < NUM_BINS; k++) {
hacc[k].fetch_add(histogram[k]);
}
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec" << std::endl;
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/06_Shared_Local_Memory/lab/convolution_global.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = rand();
}
for (int i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, 256), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
int t = 0;
if ((group == 0) || (group == N / gSize - 1)) {
if (i < M / 2) {
for (int j = M / 2 - i, k = 0; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
} else {
if (i + M / 2 >= N) {
for (int j = 0, k = i - M / 2; j < M / 2 + N - i;
j++, k++) {
t += iacc[k] * kacc[j];
}
} else {
for (int j = 0, k = i - M / 2; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
}
}
} else {
for (int j = 0, k = i - M / 2; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec\n";
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/06_Shared_Local_Memory/lab/slm_bank.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int N = 32;
auto data = sycl::malloc_shared<int>(N, q);
auto e = q.submit([&](auto &h) {
sycl::local_accessor<int, 1> slm(sycl::range(32 * 64), h);
h.parallel_for(sycl::nd_range(sycl::range{N}, sycl::range{32}), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int j = it.get_local_linear_id();
slm[j * 16] = 0;
sycl::group_barrier(it.get_group());
for (int m = 0; m < 1024 * 1024; m++) {
slm[j * 16] += i * m;
sycl::group_barrier(it.get_group());
}
data[i] = slm[j * 16];
});
});
q.wait();
std::cout << "Kernel time = "
<< (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>())
<< " ns\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/06_Shared_Local_Memory/lab/histogram_256_slm.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int N = 4096 * 4096;
std::vector<unsigned long> input(N);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = (long)rand() % 256;
input[i] |= ((long)rand() % 256) << 8;
input[i] |= ((long)rand() % 256) << 16;
input[i] |= ((long)rand() % 256) << 24;
input[i] |= ((long)rand() % 256) << 32;
input[i] |= ((long)rand() % 256) << 40;
input[i] |= ((long)rand() % 256) << 48;
input[i] |= ((long)rand() % 256) << 56;
}
sycl::queue q{sycl::gpu_selector_v,
sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>()
<< "\n";
// Snippet begin
constexpr int NUM_BINS = 256;
constexpr int blockSize = 256;
std::vector<unsigned long> hist(NUM_BINS, 0);
sycl::buffer<unsigned long, 1> mbuf(input.data(), N);
sycl::buffer<unsigned long, 1> hbuf(hist.data(), NUM_BINS);
auto e = q.submit([&](auto &h) {
sycl::accessor macc(mbuf, h, sycl::read_only);
sycl::accessor hacc(hbuf, h, sycl::read_write);
sycl::local_accessor<unsigned int> local_histogram(sycl::range(NUM_BINS),
h);
h.parallel_for(
sycl::nd_range(sycl::range{N / blockSize}, sycl::range{64}),
[=](sycl::nd_item<1> it) {
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
sycl::sub_group sg = it.get_sub_group();
int sgSize = sg.get_local_range()[0];
int sgGroup = sg.get_group_id()[0];
int factor = NUM_BINS / gSize;
int local_id = it.get_local_id()[0];
if ((factor <= 1) && (local_id < NUM_BINS)) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[local_id]);
local_bin.store(0);
} else {
for (int k = 0; k < factor; k++) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[gSize * k + local_id]);
local_bin.store(0);
}
}
sycl::group_barrier(it.get_group());
for (int k = 0; k < blockSize; k++) {
unsigned long x =
sg.load(macc.get_pointer() + group * gSize * blockSize +
sgGroup * sgSize * blockSize + sgSize * k);
#pragma unroll
for (std::uint8_t shift : {0, 8, 16, 24, 32, 40, 48, 56}) {
constexpr unsigned long mask = 0xFFU;
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[(x >> shift) & mask]);
local_bin += 1;
}
}
sycl::group_barrier(it.get_group());
if ((factor <= 1) && (local_id < NUM_BINS)) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[local_id]);
sycl::atomic_ref<unsigned long, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>
global_bin(hacc[local_id]);
global_bin += local_bin.load();
} else {
for (int k = 0; k < factor; k++) {
sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::local_space>
local_bin(local_histogram[gSize * k + local_id]);
sycl::atomic_ref<unsigned long, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>
global_bin(hacc[gSize * k + local_id]);
global_bin += local_bin.load();
}
}
});
});
// Snippet end
q.wait();
size_t kernel_ns = (e.template get_profiling_info<
sycl::info::event_profiling::command_end>() -
e.template get_profiling_info<
sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6
<< " msec" << std::endl;
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/06_Shared_Local_Memory/lab/convolution_slm.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = rand();
}
for (int i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
sycl::local_accessor<int, 1> ciacc(sycl::range(256 + (M / 2) * 2), h);
h.parallel_for(sycl::nd_range<1>(N, 256), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
int local_id = it.get_local_id()[0];
ciacc[local_id + M / 2] = iacc[i];
if (local_id == 0) {
if (group == 0) {
for (int j = 0; j < M / 2; j++) {
ciacc[j] = 0;
}
} else {
for (int j = 0, k = i - M / 2; j < M / 2; j++, k++) {
ciacc[j] = iacc[k];
}
}
}
if (local_id == gSize - 1) {
if (group == it.get_group_range()[0] - 1) {
for (int j = gSize + M / 2;
j < gSize + M / 2 + M / 2; j++) {
ciacc[j] = 0;
}
} else {
for (int j = gSize + M / 2, k = i + 1;
j < gSize + M / 2 + M / 2; j++, k++) {
ciacc[j] = iacc[k];
}
}
}
sycl::group_barrier(it.get_group());
int t = 0;
for (int j = 0, k = local_id; j < M; j++, k++) {
t += ciacc[k] * kacc[j];
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec\n";
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/04_Kernel_Submission/src/kernel_launch.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 1024000000;
int main() {
sycl::queue q;
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
q.parallel_for(N, [=](auto id) {
/* NOP */
});
auto k_subm = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
q.wait();
auto k_exec = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Kernel Submission Time: " << k_subm / 1e+9 << " seconds\n";
std::cout << "Kernel Submission + Execution Time: " << k_exec / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/04_Kernel_Submission/src/kernel_profiling.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 1024000000;
int main() {
sycl::queue q{sycl::property::queue::enable_profiling()};
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
auto e = q.parallel_for(N, [=](auto id) {
/* NOP */
});
e.wait();
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Kernel Duration : " << duration / 1e+9 << " seconds\n";
auto startK = e.get_profiling_info<sycl::info::event_profiling::command_start>();
auto endK = e.get_profiling_info<sycl::info::event_profiling::command_end>();
std::cout << "Kernel Execturion: " << (endK - startK) / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/04_Kernel_Submission/src/kernel_multiple_queues.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 1024;
#define iter 1000
int VectorAdd(sycl::queue &q1, sycl::queue &q2, sycl::queue &q3,
std::vector<int> a, std::vector<int> b) {
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer<int> *sum_buf[3 * iter];
for (size_t i = 0; i < (3 * iter); i++)
sum_buf[i] = new sycl::buffer<int>(256);
size_t num_groups = 1;
size_t wg_size = 256;
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
q1.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc = sum_buf[3 * i]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < N; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q2.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc =
sum_buf[3 * i + 1]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < N; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q3.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc =
sum_buf[3 * i + 2]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < N; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
}
q1.wait();
q2.wait();
q3.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "Vector add completed on device - took " << (end - start).count() / 1e+9 << " seconds\n";
// check results
for (size_t i = 0; i < (3 * iter); i++)
delete sum_buf[i];
return ((end - start).count());
}
int main() {
sycl::queue q(sycl::default_selector_v);
std::vector<int> a(N, 1);
std::vector<int> b(N, 2);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
// jit the code
VectorAdd(q, q, q, a, b);
std::cout << "\nSubmission to same queue out_of_order\n";
VectorAdd(q, q, q, a, b);
sycl::queue q0(sycl::default_selector_v, sycl::property::queue::in_order());
std::cout << "\nSubmission to same queue in_order\n";
VectorAdd(q0, q0, q0, a, b);
std::cout << "\nSubmission to different queues with same context\n";
sycl::queue q1(sycl::default_selector_v);
sycl::queue q2(q1.get_context(), sycl::default_selector_v);
sycl::queue q3(q1.get_context(), sycl::default_selector_v);
VectorAdd(q1, q2, q3, a, b);
std::cout << "\nSubmission to different queues with different contexts\n";
sycl::queue q4(sycl::default_selector_v);
sycl::queue q5(sycl::default_selector_v);
sycl::queue q6(sycl::default_selector_v);
VectorAdd(q4, q5, q6, a, b);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/04_Kernel_Submission/src/kernel_redundant_queue.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 1024000;
constexpr int ITER = 1000;
int main() {
std::vector<int> data(N);
sycl::buffer<int> data_buf(data);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# kernel to initialize data
sycl::queue q1;
q1.submit([&](auto &h) {
sycl::accessor data_acc(data_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto i) { data_acc[i] = i; });
}).wait();
//# for-loop with kernel computation
for (int i = 0; i < ITER; i++) {
sycl::queue q2;
q2.submit([&](auto &h) {
sycl::accessor data_acc(data_buf, h);
h.parallel_for(N, [=](auto i) {
data_acc[i] += 1;
});
});
sycl::host_accessor ha(data_buf);
}
std::cout << "data[0] = " << data[0] << "\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/04_Kernel_Submission/src/kernel_multiple.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
// Array type and data size for this example.
constexpr size_t array_size = (1 << 15);
typedef std::array<int, array_size> IntArray;
#define iter 10
int multi_queue(sycl::queue &q, const IntArray &a, const IntArray &b) {
size_t num_items = a.size();
IntArray s1, s2, s3;
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf1(s1);
sycl::buffer sum_buf2(s2);
sycl::buffer sum_buf3(s3);
size_t num_groups = 1;
size_t wg_size = 256;
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
q.submit([&](sycl::handler &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf1, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q.submit([&](sycl::handler &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf2, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q.submit([&](sycl::handler &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf3, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
}
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "multi_queue completed on device - took "
<< (end - start).count()/ 1e+9 << " seconds\n";
// check results
return ((end - start).count());
} // end multi_queue
void InitializeArray(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 1;
}
IntArray a, b;
int main() {
sycl::queue q;
InitializeArray(a);
InitializeArray(b);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
// begin in-order submission
std::cout << "In order queue: Jitting+Execution time\n";
sycl::queue q1{sycl::property::queue::in_order()};
multi_queue(q1, a, b);
std::cout << "In order queue: Execution time\n";
multi_queue(q1, a, b);
// end in-order submission
// begin out-of-order submission
sycl::queue q2;
std::cout << "Out of order queue: Jitting+Execution time\n";
multi_queue(q2, a, b);
std::cout << "Out of order queue: Execution time\n";
multi_queue(q2, a, b);
// end out-of-order submission
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/04_Kernel_Submission/lab/kernel_launch.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 1024000000;
int main() {
sycl::queue q;
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
q.parallel_for(N, [=](auto id) {
/* NOP */
});
auto k_subm = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
q.wait();
auto k_exec = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Kernel Submission Time: " << k_subm / 1e+9 << " seconds\n";
std::cout << "Kernel Submission + Execution Time: " << k_exec / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/04_Kernel_Submission/lab/kernel_profiling.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 1024000000;
int main() {
sycl::queue q{sycl::property::queue::enable_profiling()};
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
auto e = q.parallel_for(N, [=](auto id) {
/* NOP */
});
e.wait();
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Kernel Duration : " << duration / 1e+9 << " seconds\n";
auto startK = e.get_profiling_info<sycl::info::event_profiling::command_start>();
auto endK = e.get_profiling_info<sycl::info::event_profiling::command_end>();
std::cout << "Kernel Execturion: " << (endK - startK) / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/04_Kernel_Submission/lab/kernel_multiple_queues.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 1024;
#define iter 1000
int VectorAdd(sycl::queue &q1, sycl::queue &q2, sycl::queue &q3,
std::vector<int> a, std::vector<int> b) {
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer<int> *sum_buf[3 * iter];
for (size_t i = 0; i < (3 * iter); i++)
sum_buf[i] = new sycl::buffer<int>(256);
size_t num_groups = 1;
size_t wg_size = 256;
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
q1.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc = sum_buf[3 * i]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < N; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q2.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc =
sum_buf[3 * i + 1]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < N; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q3.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
auto sum_acc =
sum_buf[3 * i + 2]->get_access<sycl::access::mode::write>(h);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < N; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
}
q1.wait();
q2.wait();
q3.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "Vector add completed on device - took " << (end - start).count() / 1e+9 << " seconds\n";
// check results
for (size_t i = 0; i < (3 * iter); i++)
delete sum_buf[i];
return ((end - start).count());
}
int main() {
sycl::queue q(sycl::default_selector_v);
std::vector<int> a(N, 1);
std::vector<int> b(N, 2);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
// jit the code
VectorAdd(q, q, q, a, b);
std::cout << "\nSubmission to same queue out_of_order\n";
VectorAdd(q, q, q, a, b);
sycl::queue q0(sycl::default_selector_v, sycl::property::queue::in_order());
std::cout << "\nSubmission to same queue in_order\n";
VectorAdd(q0, q0, q0, a, b);
std::cout << "\nSubmission to different queues with same context\n";
sycl::queue q1(sycl::default_selector_v);
sycl::queue q2(q1.get_context(), sycl::default_selector_v);
sycl::queue q3(q1.get_context(), sycl::default_selector_v);
VectorAdd(q1, q2, q3, a, b);
std::cout << "\nSubmission to different queues with different contexts\n";
sycl::queue q4(sycl::default_selector_v);
sycl::queue q5(sycl::default_selector_v);
sycl::queue q6(sycl::default_selector_v);
VectorAdd(q4, q5, q6, a, b);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/04_Kernel_Submission/lab/kernel_redundant_queue.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 1024000;
constexpr int ITER = 1000;
int main() {
std::vector<int> data(N);
sycl::buffer<int> data_buf(data);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# kernel to initialize data
sycl::queue q1;
q1.submit([&](auto &h) {
sycl::accessor data_acc(data_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto i) { data_acc[i] = i; });
}).wait();
//# for-loop with kernel computation
for (int i = 0; i < ITER; i++) {
sycl::queue q2;
q2.submit([&](auto &h) {
sycl::accessor data_acc(data_buf, h);
h.parallel_for(N, [=](auto i) {
data_acc[i] += 1;
});
});
sycl::host_accessor ha(data_buf);
}
std::cout << "data[0] = " << data[0] << "\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/04_Kernel_Submission/lab/kernel_multiple.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
// Array type and data size for this example.
constexpr size_t array_size = (1 << 15);
typedef std::array<int, array_size> IntArray;
#define iter 10
int multi_queue(sycl::queue &q, const IntArray &a, const IntArray &b) {
size_t num_items = a.size();
IntArray s1, s2, s3;
sycl::buffer a_buf(a);
sycl::buffer b_buf(b);
sycl::buffer sum_buf1(s1);
sycl::buffer sum_buf2(s2);
sycl::buffer sum_buf3(s3);
size_t num_groups = 1;
size_t wg_size = 256;
auto start = std::chrono::steady_clock::now();
for (int i = 0; i < iter; i++) {
q.submit([&](sycl::handler &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf1, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q.submit([&](sycl::handler &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf2, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
q.submit([&](sycl::handler &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf3, h, sycl::write_only, sycl::no_init);
h.parallel_for(sycl::nd_range<1>(num_groups * wg_size, wg_size),
[=](sycl::nd_item<1> index) {
size_t loc_id = index.get_local_id();
sum_acc[loc_id] = 0;
for (int j = 0; j < 1000; j++)
for (size_t i = loc_id; i < array_size; i += wg_size) {
sum_acc[loc_id] += a_acc[i] + b_acc[i];
}
});
});
}
q.wait();
auto end = std::chrono::steady_clock::now();
std::cout << "multi_queue completed on device - took "
<< (end - start).count()/ 1e+9 << " seconds\n";
// check results
return ((end - start).count());
} // end multi_queue
void InitializeArray(IntArray &a) {
for (size_t i = 0; i < a.size(); i++)
a[i] = 1;
}
IntArray a, b;
int main() {
sycl::queue q;
InitializeArray(a);
InitializeArray(b);
std::cout << "Running on device: "
<< q.get_device().get_info<sycl::info::device::name>() << "\n";
std::cout << "Vector size: " << a.size() << "\n";
// begin in-order submission
std::cout << "In order queue: Jitting+Execution time\n";
sycl::queue q1{sycl::property::queue::in_order()};
multi_queue(q1, a, b);
std::cout << "In order queue: Execution time\n";
multi_queue(q1, a, b);
// end in-order submission
// begin out-of-order submission
sycl::queue q2;
std::cout << "Out of order queue: Jitting+Execution time\n";
multi_queue(q2, a, b);
std::cout << "Out of order queue: Execution time\n";
multi_queue(q2, a, b);
// end out-of-order submission
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/08_Atomic_Operations/src/atomics_global.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int N = 1024 * 1000 * 1000;
constexpr int M = 256;
int sum = 0;
int *data = static_cast<int *>(malloc(sizeof(int) * N));
for (int i = 0; i < N; i++) data[i] = 1;
sycl::queue q({sycl::property::queue::enable_profiling()});
sycl::buffer<int> buf_sum(&sum, 1);
sycl::buffer<int> buf_data(data, N);
auto e = q.submit([&](sycl::handler &h) {
sycl::accessor acc_sum(buf_sum, h);
sycl::accessor acc_data(buf_data, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, M), [=](auto it) {
auto i = it.get_global_id();
sycl::atomic_ref<int, sycl::memory_order_relaxed,
sycl::memory_scope_device, sycl::access::address_space::global_space>
atomic_op(acc_sum[0]);
atomic_op += acc_data[i];
});
});
sycl::host_accessor h_a(buf_sum);
std::cout << "Reduction Sum : " << sum << "\n";
auto total_time = (e.get_profiling_info<sycl::info::event_profiling::command_end>() - e.get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9;
std::cout << "Kernel Execution Time of Global Atomics : " << total_time << "seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/08_Atomic_Operations/src/atomics_data_type.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = 1024 * 100;
int reductionInt(sycl::queue &q, std::vector<int> &data) {
const size_t data_size = data.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) {
size_t glob_id = index[0];
auto v = sycl::atomic_ref<
int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(sum_acc[0]);
v.fetch_add(buf_acc[glob_id]);
});
});
q.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
std::cout << "ReductionInt Sum = " << sum << ", Duration " << (std::chrono::high_resolution_clock::now().time_since_epoch().count() - start) * 1e-9 << " seconds\n";
return sum;
}
int reductionFloat(sycl::queue &q, std::vector<float> &data) {
const size_t data_size = data.size();
float sum = 0.0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<float> buf(data.data(), data_size, props);
sycl::buffer<float> sum_buf(&sum, 1, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) {
size_t glob_id = index[0];
auto v = sycl::atomic_ref<
float, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(sum_acc[0]);
v.fetch_add(buf_acc[glob_id]);
});
});
q.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
std::cout << "ReductionFloat Sum = " << sum << ", Duration " << (std::chrono::high_resolution_clock::now().time_since_epoch().count() - start) * 1e-9 << " seconds\n";
return sum;
}
int main(int argc, char *argv[]) {
sycl::queue q;
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
std::vector<int> data(N, 1);
for(int i=0;i<N;i++) data[i] = 1;
reductionInt(q, data);
}
{
std::vector<float> data(N, 1.0f);
for(int i=0;i<N;i++) data[i] = 1;
reductionFloat(q, data);
}
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/08_Atomic_Operations/src/atomics_local.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int N = 1024 * 1000 * 1000;
constexpr int M = 256;
int sum = 0;
int *data = static_cast<int *>(malloc(sizeof(int) * N));
for (int i = 0; i < N; i++) data[i] = 1;
sycl::queue q({sycl::property::queue::enable_profiling()});
sycl::buffer<int> buf_sum(&sum, 1);
sycl::buffer<int> buf_data(data, N);
auto e = q.submit([&](sycl::handler &h) {
sycl::accessor acc_sum(buf_sum, h);
sycl::accessor acc_data(buf_data, h, sycl::read_only);
sycl::local_accessor<int, 1> local(1, h);
h.parallel_for(sycl::nd_range<1>(N, M), [=](auto it) {
auto i = it.get_global_id(0);
sycl::atomic_ref<int, sycl::memory_order_relaxed,
sycl::memory_scope_device, sycl::access::address_space::local_space>
atomic_op(local[0]);
atomic_op = 0;
sycl::group_barrier(it.get_group());
sycl::atomic_ref<int, sycl::memory_order_relaxed,
sycl::memory_scope_device,sycl::access::address_space::global_space>
atomic_op_global(acc_sum[0]);
atomic_op += acc_data[i];
sycl::group_barrier(it.get_group());
if (it.get_local_id() == 0)
atomic_op_global += local[0];
});
});
sycl::host_accessor ha(buf_sum);
std::cout << "Reduction Sum : " << sum << "\n";
auto total_time = (e.get_profiling_info<sycl::info::event_profiling::command_end>() - e.get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9;;
std::cout << "Kernel Execution Time of Local Atomics : " << total_time << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/08_Atomic_Operations/lab/atomics_global.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int N = 1024 * 1000 * 1000;
constexpr int M = 256;
int sum = 0;
int *data = static_cast<int *>(malloc(sizeof(int) * N));
for (int i = 0; i < N; i++) data[i] = 1;
sycl::queue q({sycl::property::queue::enable_profiling()});
sycl::buffer<int> buf_sum(&sum, 1);
sycl::buffer<int> buf_data(data, N);
auto e = q.submit([&](sycl::handler &h) {
sycl::accessor acc_sum(buf_sum, h);
sycl::accessor acc_data(buf_data, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, M), [=](auto it) {
auto i = it.get_global_id();
sycl::atomic_ref<int, sycl::memory_order_relaxed,
sycl::memory_scope_device, sycl::access::address_space::global_space>
atomic_op(acc_sum[0]);
atomic_op += acc_data[i];
});
});
sycl::host_accessor h_a(buf_sum);
std::cout << "Reduction Sum : " << sum << "\n";
auto total_time = (e.get_profiling_info<sycl::info::event_profiling::command_end>() - e.get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9;
std::cout << "Kernel Execution Time of Global Atomics : " << total_time << "seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/08_Atomic_Operations/lab/atomics_data_type.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr size_t N = 1024 * 100;
int reductionInt(sycl::queue &q, std::vector<int> &data) {
const size_t data_size = data.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) {
size_t glob_id = index[0];
auto v = sycl::atomic_ref<
int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(sum_acc[0]);
v.fetch_add(buf_acc[glob_id]);
});
});
q.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
std::cout << "ReductionInt Sum = " << sum << ", Duration " << (std::chrono::high_resolution_clock::now().time_since_epoch().count() - start) * 1e-9 << " seconds\n";
return sum;
}
int reductionFloat(sycl::queue &q, std::vector<float> &data) {
const size_t data_size = data.size();
float sum = 0.0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer<float> buf(data.data(), data_size, props);
sycl::buffer<float> sum_buf(&sum, 1, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) {
size_t glob_id = index[0];
auto v = sycl::atomic_ref<
float, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(sum_acc[0]);
v.fetch_add(buf_acc[glob_id]);
});
});
q.wait();
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
std::cout << "ReductionFloat Sum = " << sum << ", Duration " << (std::chrono::high_resolution_clock::now().time_since_epoch().count() - start) * 1e-9 << " seconds\n";
return sum;
}
int main(int argc, char *argv[]) {
sycl::queue q;
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
std::vector<int> data(N, 1);
for(int i=0;i<N;i++) data[i] = 1;
reductionInt(q, data);
}
{
std::vector<float> data(N, 1.0f);
for(int i=0;i<N;i++) data[i] = 1;
reductionFloat(q, data);
}
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/08_Atomic_Operations/lab/atomics_local.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int N = 1024 * 1000 * 1000;
constexpr int M = 256;
int sum = 0;
int *data = static_cast<int *>(malloc(sizeof(int) * N));
for (int i = 0; i < N; i++) data[i] = 1;
sycl::queue q({sycl::property::queue::enable_profiling()});
sycl::buffer<int> buf_sum(&sum, 1);
sycl::buffer<int> buf_data(data, N);
auto e = q.submit([&](sycl::handler &h) {
sycl::accessor acc_sum(buf_sum, h);
sycl::accessor acc_data(buf_data, h, sycl::read_only);
sycl::local_accessor<int, 1> local(1, h);
h.parallel_for(sycl::nd_range<1>(N, M), [=](auto it) {
auto i = it.get_global_id(0);
sycl::atomic_ref<int, sycl::memory_order_relaxed,
sycl::memory_scope_device, sycl::access::address_space::local_space>
atomic_op(local[0]);
atomic_op = 0;
sycl::group_barrier(it.get_group());
sycl::atomic_ref<int, sycl::memory_order_relaxed,
sycl::memory_scope_device,sycl::access::address_space::global_space>
atomic_op_global(acc_sum[0]);
atomic_op += acc_data[i];
sycl::group_barrier(it.get_group());
if (it.get_local_id() == 0)
atomic_op_global += local[0];
});
});
sycl::host_accessor ha(buf_sum);
std::cout << "Reduction Sum : " << sum << "\n";
auto total_time = (e.get_profiling_info<sycl::info::event_profiling::command_end>() - e.get_profiling_info<sycl::info::event_profiling::command_start>()) * 1e-9;;
std::cout << "Kernel Execution Time of Local Atomics : " << total_time << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/01_Introduction_to_GPU_Optimization/lab/hello.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main(){
// Create SYCL queue
sycl::queue q;
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
// Allocate memory
const int N = 16;
auto data = sycl::malloc_shared<int>(N, q);
for (int i=0; i<N; i++)data[i] = i;
// Submit kernel to device
q.parallel_for(N, [=](auto i){
data[i] *= 5;
}).wait();
// Print output
for (int i=0; i<N; i++) std::cout << data[i] << " ";
std::cout << "\n";
sycl::free(data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/05_Kernel_Programming/src/wg_vec_copy.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
// Copy of 32M 'one' values
constexpr size_t N = (32 * 1024 * 1024);
// Number of repetitions
constexpr int repetitions = 16;
void check_result(double elapsed, std::string msg, std::vector<int> &res) {
bool ok = true;
for (int i = 0; i < N; i++) {
if (res[i] != 1) {
ok = false;
std::cout << "ERROR: Mismatch at " << i << "\n";
}
}
if (ok)
std::cout << "SUCCESS: Time " << msg << " = " << elapsed << "s\n";
}
void vec_copy(sycl::queue &q, std::vector<int> &src, std::vector<int> &dst,
std::vector<int> &flush, int iter, int work_group_size) {
const size_t data_size = src.size();
const size_t flush_size = flush.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_work_items = data_size;
double elapsed = 0;
{
sycl::buffer<int> src_buf(src.data(), data_size, props);
sycl::buffer<int> dst_buf(dst.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
for (int i = 0; i < iter; i++) {
// flush the cache
q.submit([&](auto &h) {
sycl::accessor flush_acc(flush_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(flush_size, [=](auto index) { flush_acc[index] = 1; });
});
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
q.submit([&](auto &h) {
sycl::accessor src_acc(src_buf, h, sycl::read_only);
sycl::accessor dst_acc(dst_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(
sycl::nd_range<1>(num_work_items, work_group_size), [=
](sycl::nd_item<1> item) [[intel::reqd_sub_group_size(16)]] {
int glob_id = item.get_global_id();
dst_acc[glob_id] = src_acc[glob_id];
});
});
q.wait();
elapsed += (std::chrono::high_resolution_clock::now().time_since_epoch().count() - start) / 1e+9;
}
}
elapsed = elapsed / iter;
std::string msg = "with work-group-size=" + std::to_string(work_group_size);
check_result(elapsed, msg, dst);
} // vec_copy end
int main(int argc, char *argv[]) {
sycl::queue q;
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> src(N, 1);
std::vector<int> dst(N, 0);
std::vector<int> extra(N, 1);
// call begin
int vec_size = 16;
int work_group_size = vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 2 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 4 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 8 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 16 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
// call end
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/05_Kernel_Programming/src/convolution_global.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = rand();
}
for (int i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, 256), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
int t = 0;
if ((group == 0) || (group == N / gSize - 1)) {
if (i < M / 2) {
for (int j = M / 2 - i, k = 0; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
} else {
if (i + M / 2 >= N) {
for (int j = 0, k = i - M / 2; j < M / 2 + N - i;
j++, k++) {
t += iacc[k] * kacc[j];
}
} else {
for (int j = 0, k = i - M / 2; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
}
}
} else {
for (int j = 0, k = i - M / 2; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec\n";
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/05_Kernel_Programming/src/convolution_global_conditionals_minmax.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = rand();
}
for (int i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, 256), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int t = 0;
int startj = sycl::max<int>(M / 2 - i, 0);
int endj = sycl::min<int>(M / 2 + N - i, M);
int startk = sycl::max<int>(i - M / 2, 0);
for (int j = startj, k = startk; j < endj; j++, k++) {
t += iacc[k] * kacc[j];
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec\n";
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/05_Kernel_Programming/src/wg_reduction.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
// Summation of 10M 'one' values
constexpr size_t N = (10 * 1024 * 1024);
// Number of repetitions
constexpr int repetitions = 16;
// expected vlaue of sum
int sum_expected = N;
void init_data(sycl::queue &q, sycl::buffer<int> &buf, int data_size) {
// initialize data on the device
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
q.wait();
}
void check_result(double elapsed, std::string msg, int sum) {
if (sum == sum_expected)
std::cout << "SUCCESS: Time is " << elapsed << "s" << msg << "\n";
else
std::cout << "ERROR: Expected " << sum_expected << " but got " << sum
<< "\n";
}
void reduction(sycl::queue &q, std::vector<int> &data, std::vector<int> &flush,
int iter, int vec_size, int work_group_size) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_work_items = data_size / work_group_size;
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
init_data(q, buf, data_size);
double elapsed = 0;
for (int i = 0; i < iter; i++) {
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto index) { sum_acc[index] = 0; });
});
// flush the cache
q.submit([&](auto &h) {
sycl::accessor flush_acc(flush_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(flush_size, [=](auto index) { flush_acc[index] = 1; });
});
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
// reductionMapToHWVector main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(
sycl::nd_range<1>(num_work_items, work_group_size), [=
](sycl::nd_item<1> item) [[intel::reqd_sub_group_size(16)]] {
auto v = sycl::atomic_ref<
int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(sum_acc[0]);
int sum = 0;
int glob_id = item.get_global_id();
int loc_id = item.get_local_id();
for (int i = glob_id; i < data_size; i += num_work_items)
sum += buf_acc[i];
scratch[loc_id] = sum;
for (int i = work_group_size / 2; i > 0; i >>= 1) {
sycl::group_barrier(item.get_group());
if (loc_id < i)
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
v.fetch_add(scratch[0]);
});
});
q.wait();
elapsed += (std::chrono::high_resolution_clock::now().time_since_epoch().count() - start) / 1e+9;
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
}
elapsed = elapsed / iter;
std::string msg = " with work-groups=" + std::to_string(work_group_size);
check_result(elapsed, msg, sum);
}
int main(int argc, char *argv[]) {
sycl::queue q;
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
std::vector<int> extra(N, 1);
int vec_size = 16;
int work_group_size = vec_size;
reduction(q, data, extra, 16, vec_size, work_group_size);
work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
reduction(q, data, extra, 16, vec_size, work_group_size);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/05_Kernel_Programming/src/convolution_global_conditionals.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> input(N + M / 2 + M / 2);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (int i = M / 2; i < N + M / 2; ++i) {
input[i] = rand();
}
for (int i = 0; i < M / 2; ++i) {
input[i] = 0;
input[i + N + M / 2] = 0;
}
for (int i = 0; i < M; ++i) {
kernel[i] = rand();
}
{
sycl::buffer<int> ibuf(input.data(), N + M / 2 + M / 2);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, 256), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int t = 0;
for (int j = 0; j < M; j++) {
t += iacc[i + j] * kacc[j];
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec\n";
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/05_Kernel_Programming/lab/wg_vec_copy.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
// Copy of 32M 'one' values
constexpr size_t N = (32 * 1024 * 1024);
// Number of repetitions
constexpr int repetitions = 16;
void check_result(double elapsed, std::string msg, std::vector<int> &res) {
bool ok = true;
for (int i = 0; i < N; i++) {
if (res[i] != 1) {
ok = false;
std::cout << "ERROR: Mismatch at " << i << "\n";
}
}
if (ok)
std::cout << "SUCCESS: Time " << msg << " = " << elapsed << "s\n";
}
void vec_copy(sycl::queue &q, std::vector<int> &src, std::vector<int> &dst,
std::vector<int> &flush, int iter, int work_group_size) {
const size_t data_size = src.size();
const size_t flush_size = flush.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_work_items = data_size;
double elapsed = 0;
{
sycl::buffer<int> src_buf(src.data(), data_size, props);
sycl::buffer<int> dst_buf(dst.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
for (int i = 0; i < iter; i++) {
// flush the cache
q.submit([&](auto &h) {
sycl::accessor flush_acc(flush_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(flush_size, [=](auto index) { flush_acc[index] = 1; });
});
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
q.submit([&](auto &h) {
sycl::accessor src_acc(src_buf, h, sycl::read_only);
sycl::accessor dst_acc(dst_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(
sycl::nd_range<1>(num_work_items, work_group_size), [=
](sycl::nd_item<1> item) [[intel::reqd_sub_group_size(16)]] {
int glob_id = item.get_global_id();
dst_acc[glob_id] = src_acc[glob_id];
});
});
q.wait();
elapsed += (std::chrono::high_resolution_clock::now().time_since_epoch().count() - start) / 1e+9;
}
}
elapsed = elapsed / iter;
std::string msg = "with work-group-size=" + std::to_string(work_group_size);
check_result(elapsed, msg, dst);
} // vec_copy end
int main(int argc, char *argv[]) {
sycl::queue q;
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> src(N, 1);
std::vector<int> dst(N, 0);
std::vector<int> extra(N, 1);
// call begin
int vec_size = 16;
int work_group_size = vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 2 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 4 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 8 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
work_group_size = 16 * vec_size;
vec_copy(q, src, dst, extra, 16, work_group_size);
// call end
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/05_Kernel_Programming/lab/convolution_global.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = rand();
}
for (int i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, 256), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int group = it.get_group()[0];
int gSize = it.get_local_range()[0];
int t = 0;
if ((group == 0) || (group == N / gSize - 1)) {
if (i < M / 2) {
for (int j = M / 2 - i, k = 0; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
} else {
if (i + M / 2 >= N) {
for (int j = 0, k = i - M / 2; j < M / 2 + N - i;
j++, k++) {
t += iacc[k] * kacc[j];
}
} else {
for (int j = 0, k = i - M / 2; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
}
}
} else {
for (int j = 0, k = i - M / 2; j < M; j++, k++) {
t += iacc[k] * kacc[j];
}
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec\n";
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/05_Kernel_Programming/lab/convolution_global_conditionals_minmax.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
std::vector<int> input(N);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (int i = 0; i < N; ++i) {
input[i] = rand();
}
for (int i = 0; i < M; ++i) {
kernel[i] = rand();
}
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
{
sycl::buffer<int> ibuf(input.data(), N);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, 256), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int t = 0;
int startj = sycl::max<int>(M / 2 - i, 0);
int endj = sycl::min<int>(M / 2 + N - i, M);
int startk = sycl::max<int>(i - M / 2, 0);
for (int j = startj, k = startk; j < endj; j++, k++) {
t += iacc[k] * kacc[j];
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec\n";
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/05_Kernel_Programming/lab/wg_reduction.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
// Summation of 10M 'one' values
constexpr size_t N = (10 * 1024 * 1024);
// Number of repetitions
constexpr int repetitions = 16;
// expected vlaue of sum
int sum_expected = N;
void init_data(sycl::queue &q, sycl::buffer<int> &buf, int data_size) {
// initialize data on the device
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(data_size, [=](auto index) { buf_acc[index] = 1; });
});
q.wait();
}
void check_result(double elapsed, std::string msg, int sum) {
if (sum == sum_expected)
std::cout << "SUCCESS: Time is " << elapsed << "s" << msg << "\n";
else
std::cout << "ERROR: Expected " << sum_expected << " but got " << sum
<< "\n";
}
void reduction(sycl::queue &q, std::vector<int> &data, std::vector<int> &flush,
int iter, int vec_size, int work_group_size) {
const size_t data_size = data.size();
const size_t flush_size = flush.size();
int sum = 0;
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
int num_work_items = data_size / work_group_size;
sycl::buffer<int> buf(data.data(), data_size, props);
sycl::buffer<int> flush_buf(flush.data(), flush_size, props);
sycl::buffer<int> sum_buf(&sum, 1, props);
init_data(q, buf, data_size);
double elapsed = 0;
for (int i = 0; i < iter; i++) {
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(1, [=](auto index) { sum_acc[index] = 0; });
});
// flush the cache
q.submit([&](auto &h) {
sycl::accessor flush_acc(flush_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(flush_size, [=](auto index) { flush_acc[index] = 1; });
});
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
// reductionMapToHWVector main begin
q.submit([&](auto &h) {
sycl::accessor buf_acc(buf, h, sycl::read_only);
sycl::local_accessor<int, 1> scratch(work_group_size, h);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(
sycl::nd_range<1>(num_work_items, work_group_size), [=
](sycl::nd_item<1> item) [[intel::reqd_sub_group_size(16)]] {
auto v = sycl::atomic_ref<
int, sycl::memory_order::relaxed,
sycl::memory_scope::device,
sycl::access::address_space::global_space>(sum_acc[0]);
int sum = 0;
int glob_id = item.get_global_id();
int loc_id = item.get_local_id();
for (int i = glob_id; i < data_size; i += num_work_items)
sum += buf_acc[i];
scratch[loc_id] = sum;
for (int i = work_group_size / 2; i > 0; i >>= 1) {
sycl::group_barrier(item.get_group());
if (loc_id < i)
scratch[loc_id] += scratch[loc_id + i];
}
if (loc_id == 0)
v.fetch_add(scratch[0]);
});
});
q.wait();
elapsed += (std::chrono::high_resolution_clock::now().time_since_epoch().count() - start) / 1e+9;
sycl::host_accessor h_acc(sum_buf);
sum = h_acc[0];
}
elapsed = elapsed / iter;
std::string msg = " with work-groups=" + std::to_string(work_group_size);
check_result(elapsed, msg, sum);
}
int main(int argc, char *argv[]) {
sycl::queue q;
std::cout << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> data(N, 1);
std::vector<int> extra(N, 1);
int vec_size = 16;
int work_group_size = vec_size;
reduction(q, data, extra, 16, vec_size, work_group_size);
work_group_size =
q.get_device().get_info<sycl::info::device::max_work_group_size>();
reduction(q, data, extra, 16, vec_size, work_group_size);
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/05_Kernel_Programming/lab/convolution_global_conditionals.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr size_t N = 8192 * 8192;
constexpr size_t M = 257;
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device: " << q.get_device().get_info<sycl::info::device::name>() << "\n";
std::vector<int> input(N + M / 2 + M / 2);
std::vector<int> output(N);
std::vector<int> kernel(M);
srand(2009);
for (int i = M / 2; i < N + M / 2; ++i) {
input[i] = rand();
}
for (int i = 0; i < M / 2; ++i) {
input[i] = 0;
input[i + N + M / 2] = 0;
}
for (int i = 0; i < M; ++i) {
kernel[i] = rand();
}
{
sycl::buffer<int> ibuf(input.data(), N + M / 2 + M / 2);
sycl::buffer<int> obuf(output.data(), N);
sycl::buffer<int> kbuf(kernel.data(), M);
auto e = q.submit([&](auto &h) {
sycl::accessor iacc(ibuf, h, sycl::read_only);
sycl::accessor oacc(obuf, h);
sycl::accessor kacc(kbuf, h, sycl::read_only);
h.parallel_for(sycl::nd_range<1>(N, 256), [=](sycl::nd_item<1> it) {
int i = it.get_global_linear_id();
int t = 0;
for (int j = 0; j < M; j++) {
t += iacc[i + j] * kacc[j];
}
oacc[i] = t;
});
});
q.wait();
size_t kernel_ns = (e.template get_profiling_info<sycl::info::event_profiling::command_end>() - e.template get_profiling_info<sycl::info::event_profiling::command_start>());
std::cout << "Kernel Execution Time Average: total = " << kernel_ns * 1e-6 << " msec\n";
}
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/usm_device.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# initialize data on host
constexpr int N = 16;
int host_data[N];
for (int i = 0; i < N; i++) host_data[i] = 10;
//# Explicit USM allocation using malloc_device
int *device_data = sycl::malloc_device<int>(N, q);
//# copy mem from host to device
q.memcpy(device_data, host_data, sizeof(int) * N).wait();
//# update device memory
q.parallel_for(N, [=](auto i) { device_data[i] += 1; }).wait();
//# copy mem from device to host
q.memcpy(host_data, device_data, sizeof(int) * N).wait();
//# print output
for (int i = 0; i < N; i++) std::cout << host_data[i] << " ";std::cout <<"\n";
sycl::free(device_data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/buffer_mem_move_1.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int num_items = 1024*1000*1000;
std::vector<int> a(num_items);
for(int i=0;i<num_items;i++) a[i] = i;
std::vector<int> b(num_items, 1);
std::vector<int> c(num_items, 2);
std::vector<int> d(num_items, 3);
std::vector<int> sum(num_items, 0);
std::vector<int> res(num_items, 0);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer a_buf(a, props);
sycl::buffer b_buf(b, props);
sycl::buffer c_buf(c, props);
sycl::buffer d_buf(d, props);
sycl::buffer sum_buf(sum, props);
sycl::buffer res_buf(res, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# Kernel 1
q.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; });
});
//# Kernel 3
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::read_write);
h.parallel_for(num_items, [=](auto i) {
if (sum_acc[i] > 10)
sum_acc[i] = 1;
else
sum_acc[i] = 0;
});
});
//# Kernel 2
q.submit([&](auto &h) {
sycl::accessor c_acc(c_buf, h, sycl::read_only);
sycl::accessor d_acc(d_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::read_only);
sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) { res_acc[i] = sum_acc[i] * c_acc[i] + d_acc[i]; });
}).wait();
sycl::host_accessor h_acc(res_buf);
for (int i = 0; i < 20; i++) std::cout << h_acc[i] << " ";std::cout << "...\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/usm_memcpy.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int N = 1024000000;
//# host allocation using malloc
auto host_data = static_cast<int *>(malloc(N * sizeof(int)));
//# USM host allocation using malloc_host
auto host_data_usm = sycl::malloc_host<int>(N, q);
//# USM device allocation using malloc_device
auto device_data_usm = sycl::malloc_device<int>(N, q);
//# copy mem from host (malloc) to device
auto e1 = q.memcpy(device_data_usm, host_data, sizeof(int) * N);
//# copy mem from host (malloc_host) to device
auto e2 = q.memcpy(device_data_usm, host_data_usm, sizeof(int) * N);
q.wait();
//# free allocations
sycl::free(device_data_usm, q);
sycl::free(host_data_usm, q);
free(host_data);
std::cout << "memcpy Time (malloc-to-malloc_device) : " << (e1.template get_profiling_info<sycl::info::event_profiling::command_end>() - e1.template get_profiling_info<sycl::info::event_profiling::command_start>()) / 1e+9 << " seconds\n";
std::cout << "memcpy Time (malloc_host-to-malloc_device : " << (e2.template get_profiling_info<sycl::info::event_profiling::command_end>() - e2.template get_profiling_info<sycl::info::event_profiling::command_start>()) / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/buffer_mem_move_2.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int num_items = 1024*1000*1000;
std::vector<int> a(num_items);
for(int i=0;i<num_items;i++) a[i] = i;
std::vector<int> b(num_items, 1);
std::vector<int> c(num_items, 2);
std::vector<int> d(num_items, 3);
std::vector<int> sum(num_items, 0);
std::vector<int> res(num_items, 0);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer a_buf(a, props);
sycl::buffer b_buf(b, props);
sycl::buffer c_buf(c, props);
sycl::buffer d_buf(d, props);
sycl::buffer sum_buf(sum, props);
sycl::buffer res_buf(res, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# Kernel 1
q.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) {
int t = a_acc[i] + b_acc[i];
if (t > 10)
sum_acc[i] = 1;
else
sum_acc[i] = 0;
});
});
//# Kernel 2
q.submit([&](auto &h) {
sycl::accessor c_acc(c_buf, h, sycl::read_only);
sycl::accessor d_acc(d_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::read_only);
sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) { res_acc[i] = sum_acc[i] * c_acc[i] + d_acc[i]; });
}).wait();
sycl::host_accessor h_acc(res_buf);
for (int i = 0; i < 20; i++) std::cout << h_acc[i] << " ";std::cout << "...\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/buffer_loop.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#include <chrono>
constexpr int N = 16;
constexpr int STEPS = 10000;
int main() {
std::vector<int> a(N, 1);
std::vector<int> b(N, 2);
std::vector<int> c(N);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
sycl::queue q;
sycl::buffer<int> a_buf(a);
sycl::buffer<int> b_buf(b);
for (int j = 0; j < STEPS; j++) {
//# Buffer c in the loop
sycl::buffer<int> c_buf(c);
q.submit([&](auto &h) {
// Create device accessors.
sycl::accessor a_acc(a_buf, h);
sycl::accessor b_acc(b_buf, h);
sycl::accessor c_acc(c_buf, h, sycl::no_init);
h.parallel_for(N, [=](auto i) {
c_acc[i] = (a_acc[i] < b_acc[i]) ? -1 : 1;
a_acc[i] += c_acc[i];
b_acc[i] -= c_acc[i];
});
});
}
// Create host accessors.
const sycl::host_accessor ha(a_buf);
const sycl::host_accessor hb(b_buf);
printf("%d %d\n", ha[N / 2], hb[N / 2]);
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/buffers.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int N = 16;
std::vector<int> host_data(N, 10);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# Modify data array on device
sycl::buffer buffer_data(host_data);
q.submit([&](sycl::handler& h) {
sycl::accessor device_data(buffer_data, h);
h.parallel_for(N, [=](auto i) { device_data[i] += 1; });
});
sycl::host_accessor ha(buffer_data, sycl::read_only);
//# print output
for (int i = 0; i < N; i++) std::cout << ha[i] << " ";std::cout << "\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/buffer_host_ptr.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int num_items = 16;
constexpr int iter = 1;
std::vector<int> a(num_items, 10);
std::vector<int> b(num_items, 10);
std::vector<int> sum(num_items, 0);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer a_buf(a, props);
sycl::buffer b_buf(b, props);
sycl::buffer sum_buf(sum, props);
{
sycl::host_accessor a_host_acc(a_buf);
std::cout << "address of vector a = " << a.data() << "\n";
std::cout << "buffer memory address = " << a_host_acc.get_pointer() << "\n";
}
q.submit([&](auto &h) {
// Input accessors
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
// Output accessor
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
sycl::stream out(1024 * 1024, 1 * 128, h);
h.parallel_for(num_items, [=](auto i) {
if (i[0] == 0)
out << "device accessor address = " << a_acc.get_pointer() << "\n";
sum_acc[i] = a_acc[i] + b_acc[i];
});
}).wait();
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/buffer_mem_move_0.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int num_items = 1024*1000*1000;
std::vector<int> a(num_items);
for(int i=0;i<num_items;i++) a[i] = i;
std::vector<int> b(num_items, 1);
std::vector<int> c(num_items, 2);
std::vector<int> d(num_items, 3);
std::vector<int> sum(num_items, 0);
std::vector<int> res(num_items, 0);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer a_buf(a, props);
sycl::buffer b_buf(b, props);
sycl::buffer c_buf(c, props);
sycl::buffer d_buf(d, props);
sycl::buffer sum_buf(sum, props);
sycl::buffer res_buf(res, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# Kernel 1
q.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; });
});
{
sycl::host_accessor h_acc(sum_buf);
for (int j = 0; j < num_items; j++)
if (h_acc[j] > 10)
h_acc[j] = 1;
else
h_acc[j] = 0;
}
//# Kernel 2
q.submit([&](auto &h) {
sycl::accessor c_acc(c_buf, h, sycl::read_only);
sycl::accessor d_acc(d_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::read_only);
sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) { res_acc[i] = sum_acc[i] * c_acc[i] + d_acc[i]; });
}).wait();
sycl::host_accessor h_acc(res_buf);
for (int i = 0; i < 20; i++) std::cout << h_acc[i] << " ";std::cout << "...\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/buffer_access_modes.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
constexpr int N = 1024000000;
int main() {
std::vector<int> a(N, 1);
std::vector<int> b(N, 2);
std::vector<int> c(N);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
sycl::queue q;
{
sycl::buffer<int> a_buf(a);
sycl::buffer<int> b_buf(b);
sycl::buffer<int> c_buf(c);
q.submit([&](auto &h) {
// Create device accessors.
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor c_acc(c_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(N, [=](auto i) {
c_acc[i] = a_acc[i] + b_acc[i];
});
});
}
std::cout << "C = " << c[N/2] << "\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/usm_copy_partial.cpp
|
//==============================================================
// Copyright © 2020 Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
static constexpr size_t N = 102400000; // global size
int main() {
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# setup queue with default selector
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# initialize data array using usm
int *data = static_cast<int *>(malloc(N * sizeof(int)));
for (int i = 0; i < N; i++) data[i] = 1;
//# USM device allocation
auto device_data = sycl::malloc_device<int>(N, q);
//# copy mem from host to device
q.memcpy(device_data, data, sizeof(int) * N).wait();
//# single_task kernel performing simple addition of all elements
q.single_task([=](){
int sum = 0;
for(int i=0;i<N;i++){
sum += device_data[i];
}
device_data[0] = sum;
}).wait();
//# copy mem from device to host
q.memcpy(data, device_data, sizeof(int) * N).wait();
std::cout << "Sum = " << data[0] << "\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
sycl::free(device_data, q);
free(data);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/usm_overlap_copy.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
#define NITERS 10
#define KERNEL_ITERS 10000
#define NUM_CHUNKS 10
#define CHUNK_SIZE 10000000
int main() {
const int num_chunks = NUM_CHUNKS;
const int chunk_size = CHUNK_SIZE;
const int iter = NITERS;
sycl::queue q;
//# Allocate and initialize host data
float *host_data[num_chunks];
for (int c = 0; c < num_chunks; c++) {
host_data[c] = sycl::malloc_host<float>(chunk_size, q);
float val = c;
for (int i = 0; i < chunk_size; i++)
host_data[c][i] = val;
}
std::cout << "Allocated host data\n";
//# Allocate and initialize device memory
float *device_data[num_chunks];
for (int c = 0; c < num_chunks; c++) {
device_data[c] = sycl::malloc_device<float>(chunk_size, q);
float val = 1000.0;
q.fill<float>(device_data[c], val, chunk_size);
}
q.wait();
std::cout << "Allocated device data\n";
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
for (int it = 0; it < iter; it++) {
for (int c = 0; c < num_chunks; c++) {
//# Copy-in not dependent on previous event
auto copy_in_event = q.memcpy(device_data[c], host_data[c], sizeof(float) * chunk_size);
//# Compute waits for copy_in_event
auto compute_event = q.parallel_for(chunk_size, copy_in_event, [=](auto id) {
for (int i = 0; i < KERNEL_ITERS; i++) device_data[c][id] += 1.0;
});
//# Copy out waits for compute_event
auto copy_out_event = q.memcpy(host_data[c], device_data[c], sizeof(float) * chunk_size, compute_event);
}
q.wait();
}
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
for (int c = 0; c < num_chunks; c++) {
for (int i = 0; i < chunk_size; i++) {
if (host_data[c][i] != (float)((c + KERNEL_ITERS * iter))) {
std::cout << "Mismatch for chunk: " << c << " position: " << i
<< " expected: " << c + 10000 << " got: " << host_data[c][i]
<< "\n";
break;
}
}
}
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/usm_shared.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# USM allocation using malloc_shared
constexpr int N = 16;
int *data = sycl::malloc_shared<int>(N, q);
//# Initialize data array
for (int i = 0; i < N; i++) data[i] = 10;
//# Modify data array on device
q.parallel_for(N, [=](auto i) { data[i] += 1; }).wait();
//# print output
for (int i = 0; i < N; i++) std::cout << data[i] << " ";std::cout << "\n";
sycl::free(data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/buffer_mem_move_3.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int num_items = 1024*1000*1000;
std::vector<int> a(num_items);
for(int i=0;i<num_items;i++) a[i] = i;
std::vector<int> b(num_items, 1);
std::vector<int> c(num_items, 2);
std::vector<int> d(num_items, 3);
std::vector<int> res(num_items, 0);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer a_buf(a, props);
sycl::buffer b_buf(b, props);
sycl::buffer c_buf(c, props);
sycl::buffer d_buf(d, props);
sycl::buffer res_buf(res, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# Kernel 1
q.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor c_acc(c_buf, h, sycl::read_only);
sycl::accessor d_acc(d_buf, h, sycl::read_only);
sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) {
int t = a_acc[i] + b_acc[i];
if (t > 10)
res_acc[i] = c_acc[i] + d_acc[i] ;
else
res_acc[i] = d_acc[i];
});
}).wait();
sycl::host_accessor h_acc(res_buf);
for (int i = 0; i < 20; i++) std::cout << h_acc[i] << " ";std::cout << "...\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/src/align.hpp
|
#ifndef __ALIGN
#define __ALIGN 1
enum class Alignment : size_t {
Normal = sizeof(void *),
SSE = 16,
AVX = 32,
PAGE = 4096,
};
namespace detail {
void *allocate_aligned_memory(size_t align, size_t size);
void deallocate_aligned_memory(void *ptr) noexcept;
} // namespace detail
template <typename T, Alignment Align = Alignment::PAGE> class AlignedAllocator;
template <Alignment Align> class AlignedAllocator<void, Align> {
public:
typedef void *pointer;
typedef const void *const_pointer;
typedef void value_type;
template <class U> struct rebind {
typedef AlignedAllocator<U, Align> other;
};
};
template <typename T, Alignment Align> class AlignedAllocator {
public:
typedef T value_type;
typedef T *pointer;
typedef const T *const_pointer;
typedef T &reference;
typedef const T &const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef std::true_type propagate_on_container_move_assignment;
template <class U> struct rebind {
typedef AlignedAllocator<U, Align> other;
};
public:
AlignedAllocator() noexcept {}
template <class U>
AlignedAllocator(const AlignedAllocator<U, Align> &) noexcept {}
size_type max_size() const noexcept {
return (size_type(~0) - size_type(Align)) / sizeof(T);
}
pointer address(reference x) const noexcept { return std::addressof(x); }
const_pointer address(const_reference x) const noexcept {
return std::addressof(x);
}
pointer allocate(size_type n,
typename AlignedAllocator<void, Align>::const_pointer = 0) {
const size_type alignment = static_cast<size_type>(Align);
void *ptr = detail::allocate_aligned_memory(alignment, n * sizeof(T));
if (ptr == nullptr) {
throw std::bad_alloc();
}
return reinterpret_cast<pointer>(ptr);
}
void deallocate(pointer p, size_type) noexcept {
return detail::deallocate_aligned_memory(p);
}
template <class U, class... Args> void construct(U *p, Args &&... args) {
::new (reinterpret_cast<void *>(p)) U(std::forward<Args>(args)...);
}
void destroy(pointer p) { p->~T(); }
};
template <typename T, Alignment Align> class AlignedAllocator<const T, Align> {
public:
typedef T value_type;
typedef const T *pointer;
typedef const T *const_pointer;
typedef const T &reference;
typedef const T &const_reference;
typedef size_t size_type;
typedef ptrdiff_t difference_type;
typedef std::true_type propagate_on_container_move_assignment;
template <class U> struct rebind {
typedef AlignedAllocator<U, Align> other;
};
public:
AlignedAllocator() noexcept {}
template <class U>
AlignedAllocator(const AlignedAllocator<U, Align> &) noexcept {}
size_type max_size() const noexcept {
return (size_type(~0) - size_type(Align)) / sizeof(T);
}
const_pointer address(const_reference x) const noexcept {
return std::addressof(x);
}
pointer allocate(size_type n,
typename AlignedAllocator<void, Align>::const_pointer = 0) {
const size_type alignment = static_cast<size_type>(Align);
void *ptr = detail::allocate_aligned_memory(alignment, n * sizeof(T));
if (ptr == nullptr) {
throw std::bad_alloc();
}
return reinterpret_cast<pointer>(ptr);
}
void deallocate(pointer p, size_type) noexcept {
return detail::deallocate_aligned_memory(p);
}
template <class U, class... Args> void construct(U *p, Args &&... args) {
::new (reinterpret_cast<void *>(p)) U(std::forward<Args>(args)...);
}
void destroy(pointer p) { p->~T(); }
};
template <typename T, Alignment TAlign, typename U, Alignment UAlign>
inline bool operator==(const AlignedAllocator<T, TAlign> &,
const AlignedAllocator<U, UAlign> &) noexcept {
return TAlign == UAlign;
}
template <typename T, Alignment TAlign, typename U, Alignment UAlign>
inline bool operator!=(const AlignedAllocator<T, TAlign> &,
const AlignedAllocator<U, UAlign> &) noexcept {
return TAlign != UAlign;
}
void *detail::allocate_aligned_memory(size_t align, size_t size) {
assert(align >= sizeof(void *));
// assert(nail::is_power_of_two(align));
if (size == 0) {
return nullptr;
}
void *ptr = nullptr;
int rc = posix_memalign(&ptr, align, size);
if (rc != 0) {
return nullptr;
}
return ptr;
}
void detail::deallocate_aligned_memory(void *ptr) noexcept { return free(ptr); }
#endif
|
hpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/usm_device.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
//# initialize data on host
constexpr int N = 16;
int host_data[N];
for (int i = 0; i < N; i++) host_data[i] = 10;
//# Explicit USM allocation using malloc_device
int *device_data = sycl::malloc_device<int>(N, q);
//# copy mem from host to device
q.memcpy(device_data, host_data, sizeof(int) * N).wait();
//# update device memory
q.parallel_for(N, [=](auto i) { device_data[i] += 1; }).wait();
//# copy mem from device to host
q.memcpy(host_data, device_data, sizeof(int) * N).wait();
//# print output
for (int i = 0; i < N; i++) std::cout << host_data[i] << " ";std::cout <<"\n";
sycl::free(device_data, q);
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/buffer_mem_move_1.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
constexpr int num_items = 1024*1000*1000;
std::vector<int> a(num_items);
for(int i=0;i<num_items;i++) a[i] = i;
std::vector<int> b(num_items, 1);
std::vector<int> c(num_items, 2);
std::vector<int> d(num_items, 3);
std::vector<int> sum(num_items, 0);
std::vector<int> res(num_items, 0);
sycl::queue q;
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
const sycl::property_list props = {sycl::property::buffer::use_host_ptr()};
sycl::buffer a_buf(a, props);
sycl::buffer b_buf(b, props);
sycl::buffer c_buf(c, props);
sycl::buffer d_buf(d, props);
sycl::buffer sum_buf(sum, props);
sycl::buffer res_buf(res, props);
auto start = std::chrono::high_resolution_clock::now().time_since_epoch().count();
//# Kernel 1
q.submit([&](auto &h) {
sycl::accessor a_acc(a_buf, h, sycl::read_only);
sycl::accessor b_acc(b_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) { sum_acc[i] = a_acc[i] + b_acc[i]; });
});
//# Kernel 3
q.submit([&](auto &h) {
sycl::accessor sum_acc(sum_buf, h, sycl::read_write);
h.parallel_for(num_items, [=](auto i) {
if (sum_acc[i] > 10)
sum_acc[i] = 1;
else
sum_acc[i] = 0;
});
});
//# Kernel 2
q.submit([&](auto &h) {
sycl::accessor c_acc(c_buf, h, sycl::read_only);
sycl::accessor d_acc(d_buf, h, sycl::read_only);
sycl::accessor sum_acc(sum_buf, h, sycl::read_only);
sycl::accessor res_acc(res_buf, h, sycl::write_only, sycl::no_init);
h.parallel_for(num_items, [=](auto i) { res_acc[i] = sum_acc[i] * c_acc[i] + d_acc[i]; });
}).wait();
sycl::host_accessor h_acc(res_buf);
for (int i = 0; i < 20; i++) std::cout << h_acc[i] << " ";std::cout << "...\n";
auto duration = std::chrono::high_resolution_clock::now().time_since_epoch().count() - start;
std::cout << "Compute Duration: " << duration / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
oneAPI-samples
|
data/projects/oneAPI-samples/DirectProgramming/C++SYCL/Jupyter/gpu-optimization-sycl-training/03_Memory_Optimization/lab/usm_memcpy.cpp
|
//==============================================================
// Copyright © Intel Corporation
//
// SPDX-License-Identifier: MIT
// =============================================================
#include <sycl/sycl.hpp>
int main() {
sycl::queue q{sycl::property::queue::enable_profiling{}};
std::cout << "Device : " << q.get_device().get_info<sycl::info::device::name>() << "\n";
constexpr int N = 1024000000;
//# host allocation using malloc
auto host_data = static_cast<int *>(malloc(N * sizeof(int)));
//# USM host allocation using malloc_host
auto host_data_usm = sycl::malloc_host<int>(N, q);
//# USM device allocation using malloc_device
auto device_data_usm = sycl::malloc_device<int>(N, q);
//# copy mem from host (malloc) to device
auto e1 = q.memcpy(device_data_usm, host_data, sizeof(int) * N);
//# copy mem from host (malloc_host) to device
auto e2 = q.memcpy(device_data_usm, host_data_usm, sizeof(int) * N);
q.wait();
//# free allocations
sycl::free(device_data_usm, q);
sycl::free(host_data_usm, q);
free(host_data);
std::cout << "memcpy Time (malloc-to-malloc_device) : " << (e1.template get_profiling_info<sycl::info::event_profiling::command_end>() - e1.template get_profiling_info<sycl::info::event_profiling::command_start>()) / 1e+9 << " seconds\n";
std::cout << "memcpy Time (malloc_host-to-malloc_device : " << (e2.template get_profiling_info<sycl::info::event_profiling::command_end>() - e2.template get_profiling_info<sycl::info::event_profiling::command_start>()) / 1e+9 << " seconds\n";
return 0;
}
|
cpp
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.